diff --git a/README.textile b/README.textile index e6057f022da..720f357406b 100644 --- a/README.textile +++ b/README.textile @@ -34,6 +34,10 @@ h2. Getting Started First of all, DON'T PANIC. It will take 5 minutes to get the gist of what Elasticsearch is all about. +h3. Requirements + +You need to have a recent version of Java installed. See the "Setup":http://www.elastic.co/guide/en/elasticsearch/reference/current/setup.html#jvm-version page for more information. + h3. Installation * "Download":https://www.elastic.co/downloads/elasticsearch and unzip the Elasticsearch official distribution. diff --git a/bin/service.bat b/bin/service.bat index 282483c3cb8..a20d6e252ad 100644 --- a/bin/service.bat +++ b/bin/service.bat @@ -137,13 +137,11 @@ set JVM_SS=256 if "%DATA_DIR%" == "" set DATA_DIR=%ES_HOME%\data -if "%WORK_DIR%" == "" set WORK_DIR=%ES_HOME% - if "%CONF_DIR%" == "" set CONF_DIR=%ES_HOME%\config if "%CONF_FILE%" == "" set CONF_FILE=%ES_HOME%\config\elasticsearch.yml -set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.work="%WORK_DIR%";-Des.default.path.conf="%CONF_DIR%" +set ES_PARAMS=-Delasticsearch;-Des.path.home="%ES_HOME%";-Des.default.config="%CONF_FILE%";-Des.default.path.home="%ES_HOME%";-Des.default.path.logs="%LOG_DIR%";-Des.default.path.data="%DATA_DIR%";-Des.default.path.conf="%CONF_DIR%" set JVM_OPTS=%JAVA_OPTS: =;% diff --git a/config/elasticsearch.yml b/config/elasticsearch.yml index f1359cd58d0..b3baf765b3a 100644 --- a/config/elasticsearch.yml +++ b/config/elasticsearch.yml @@ -1,373 +1,99 @@ -##################### Elasticsearch Configuration Example ##################### - -# This file contains an overview of various configuration settings, -# targeted at operations staff. Application developers should -# consult the guide at . +# ======================== Elasticsearch Configuration ========================= # -# The installation procedure is covered at -# . +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. # -# Elasticsearch comes with reasonable defaults for most settings, -# so you can try it out without bothering with configuration. +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. # -# Most of the time, these defaults are just fine for running a production -# cluster. If you're fine-tuning your cluster, or wondering about the -# effect of certain configuration option, please _do ask_ on the -# mailing list or IRC channel [http://elasticsearch.org/community]. - -# Any element in the configuration can be replaced with environment variables -# by placing them in ${...} notation. For example: +# Please see the documentation for further information on configuration options: +# # -#node.rack: ${RACK_ENV_VAR} - -# For information on supported formats and syntax for the config file, see -# - - -################################### Cluster ################################### - -# Cluster name identifies your cluster for auto-discovery. If you're running -# multiple clusters on the same network, make sure you're using unique names. +# ---------------------------------- Cluster ----------------------------------- # -#cluster.name: elasticsearch - - -#################################### Node ##################################### - -# Node names are generated dynamically on startup, so you're relieved -# from configuring them manually. You can tie this node to a specific name: +# Use a descriptive name for your cluster: # -#node.name: "Franz Kafka" - -# Every node can be configured to allow or deny being eligible as the master, -# and to allow or deny to store the data. +# cluster.name: my-application # -# Allow this node to be eligible as a master node (enabled by default): +# ------------------------------------ Node ------------------------------------ # -#node.master: true +# Use a descriptive name for the node: # -# Allow this node to store data (enabled by default): +# node.name: node-1 # -#node.data: true - -# You can exploit these settings to design advanced cluster topologies. +# Add custom attributes to the node: # -# 1. You want this node to never become a master node, only to hold data. -# This will be the "workhorse" of your cluster. +# node.rack: r1 # -#node.master: false -#node.data: true +# ----------------------------------- Paths ------------------------------------ # -# 2. You want this node to only serve as a master: to not store any data and -# to have free resources. This will be the "coordinator" of your cluster. +# Path to directory where to store the data (separate multiple locations by comma): # -#node.master: true -#node.data: false +# path.data: /path/to/data # -# 3. You want this node to be neither master nor data node, but -# to act as a "search load balancer" (fetching data from nodes, -# aggregating results, etc.) -# -#node.master: false -#node.data: false - -# Use the Cluster Health API [http://localhost:9200/_cluster/health], the -# Node Info API [http://localhost:9200/_nodes] or GUI tools -# such as , -# , -# and -# to inspect the cluster state. - -# A node can have generic attributes associated with it, which can later be used -# for customized shard allocation filtering, or allocation awareness. An attribute -# is a simple key value pair, similar to node.key: value, here is an example: -# -#node.rack: rack314 - -# By default, multiple nodes are allowed to start from the same installation location -# to disable it, set the following: -#node.max_local_storage_nodes: 1 - - -#################################### Index #################################### - -# You can set a number of options (such as shard/replica options, mapping -# or analyzer definitions, translog settings, ...) for indices globally, -# in this file. -# -# Note, that it makes more sense to configure index settings specifically for -# a certain index, either when creating it or by using the index templates API. -# -# See and -# -# for more information. - -# Set the number of shards (splits) of an index (5 by default): -# -#index.number_of_shards: 5 - -# Set the number of replicas (additional copies) of an index (1 by default): -# -#index.number_of_replicas: 1 - -# Note, that for development on a local machine, with small indices, it usually -# makes sense to "disable" the distributed features: -# -#index.number_of_shards: 1 -#index.number_of_replicas: 0 - -# These settings directly affect the performance of index and search operations -# in your cluster. Assuming you have enough machines to hold shards and -# replicas, the rule of thumb is: -# -# 1. Having more *shards* enhances the _indexing_ performance and allows to -# _distribute_ a big index across machines. -# 2. Having more *replicas* enhances the _search_ performance and improves the -# cluster _availability_. -# -# The "number_of_shards" is a one-time setting for an index. -# -# The "number_of_replicas" can be increased or decreased anytime, -# by using the Index Update Settings API. -# -# Elasticsearch takes care about load balancing, relocating, gathering the -# results from nodes, etc. Experiment with different settings to fine-tune -# your setup. - -# Use the Index Status API () to inspect -# the index status. - - -#################################### Paths #################################### - -# Path to directory containing configuration (this file and logging.yml): -# -#path.conf: /path/to/conf - -# Path to directory where to store index data allocated for this node. -# -#path.data: /path/to/data -# -# Can optionally include more than one location, causing data to be striped across -# the locations (a la RAID 0) on a file level, favouring locations with most free -# space on creation. For example: -# -#path.data: /path/to/data1,/path/to/data2 - -# Path to temporary files: -# -#path.work: /path/to/work - # Path to log files: # -#path.logs: /path/to/logs - -# Path to where plugins are installed: +# path.logs: /path/to/logs # -#path.plugins: /path/to/plugins - - -#################################### Plugin ################################### - -# If a plugin listed here is not installed for current node, the node will not start. +# ----------------------------------- Memory ----------------------------------- # -#plugin.mandatory: mapper-attachments,lang-groovy - - -################################### Memory #################################### - -# Elasticsearch performs poorly when JVM starts swapping: you should ensure that -# it _never_ swaps. +# Lock the memory on startup: # -# Set this property to true to lock the memory: +# bootstrap.mlockall: true # -#bootstrap.mlockall: true - -# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set -# to the same value, and that the machine has enough memory to allocate -# for Elasticsearch, leaving enough memory for the operating system itself. +# Make sure that the `ES_HEAP_SIZE` environment variable is set to about half the memory +# available on the system and that the owner of the process is allowed to use this limit. # -# You should also make sure that the Elasticsearch process is allowed to lock -# the memory, eg. by using `ulimit -l unlimited`. - - -############################## Network And HTTP ############################### - -# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens -# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node -# communication. (the range means that if the port is busy, it will automatically -# try the next port). - -# Set the bind address specifically (IPv4 or IPv6): +# Elasticsearch performs poorly when the system is swapping the memory. # -#network.bind_host: 192.168.0.1 - -# Set the address other nodes will use to communicate with this node. If not -# set, it is automatically derived. It must point to an actual IP address. +# ---------------------------------- Network ----------------------------------- # -#network.publish_host: 192.168.0.1 - -# Set both 'bind_host' and 'publish_host': +# Set the bind adress to a specific IP (IPv4 or IPv6): # -#network.host: 192.168.0.1 - -# Set a custom port for the node to node communication (9300 by default): +# network.host: 192.168.0.1 # -#transport.tcp.port: 9300 - -# Enable compression for all communication between nodes (disabled by default): +# Set a custom port for HTTP: # -#transport.tcp.compress: true - -# Set a custom port to listen for HTTP traffic: +# http.port: 9200 # -#http.port: 9200 - -# Set a custom allowed content length: +# For more information, see the documentation at: +# # -#http.max_content_length: 100mb - -# Disable HTTP completely: +# ---------------------------------- Gateway ----------------------------------- # -#http.enabled: false - - -################################### Gateway ################################### - -# The gateway allows for persisting the cluster state between full cluster -# restarts. Every change to the state (such as adding an index) will be stored -# in the gateway, and when the cluster starts up for the first time, -# it will read its state from the gateway. - -# For more information, see -# . - -# Settings below control how and when to start the initial recovery process on -# a full cluster restart (to reuse as much local data as possible when using shared -# gateway). - -# Allow recovery process after N nodes in a cluster are up: +# Block initial recovery after a full cluster restart until N nodes are started: # -#gateway.recover_after_nodes: 1 - -# Set the timeout to initiate the recovery process, once the N nodes -# from previous setting are up (accepts time value): +# gateway.recover_after_nodes: 3 # -#gateway.recover_after_time: 5m - -# Set how many nodes are expected in this cluster. Once these N nodes -# are up (and recover_after_nodes is met), begin recovery process immediately -# (without waiting for recover_after_time to expire): +# For more information, see the documentation at: +# # -#gateway.expected_nodes: 2 - - -############################# Recovery Throttling ############################# - -# These settings allow to control the process of shards allocation between -# nodes during initial recovery, replica allocation, rebalancing, -# or when adding and removing nodes. - -# Set the number of concurrent recoveries happening on a node: +# --------------------------------- Discovery ---------------------------------- # -# 1. During the initial recovery +# Elasticsearch nodes will find each other via multicast, by default. # -#cluster.routing.allocation.node_initial_primaries_recoveries: 4 +# To use the unicast discovery, disable the multicast discovery: # -# 2. During adding/removing nodes, rebalancing, etc +# discovery.zen.ping.multicast.enabled: false # -#cluster.routing.allocation.node_concurrent_recoveries: 2 - -# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): +# Pass an initial list of hosts to perform discovery when new node is started: # -#indices.recovery.max_bytes_per_sec: 20mb - -# Set to limit the number of open concurrent streams when -# recovering a shard from a peer: +# discovery.zen.ping.unicast.hosts: ["host1", "host2"] # -#indices.recovery.concurrent_streams: 5 - - -################################## Discovery ################################## - -# Discovery infrastructure ensures nodes can be found within a cluster -# and master node is elected. Multicast discovery is the default. - -# Set to ensure a node sees N other master eligible nodes to be considered -# operational within the cluster. This should be set to a quorum/majority of -# the master-eligible nodes in the cluster. +# Prevent the "split brain" by configuring the majority of nodes (total number of nodes / 2 + 1): # -#discovery.zen.minimum_master_nodes: 1 - -# Set the time to wait for ping responses from other nodes when discovering. -# Set this option to a higher value on a slow or congested network -# to minimize discovery failures: +# discovery.zen.minimum_master_nodes: 3 # -#discovery.zen.ping.timeout: 3s - -# For more information, see -# - -# Unicast discovery allows to explicitly control which nodes will be used -# to discover the cluster. It can be used when multicast is not present, -# or to restrict the cluster communication-wise. +# For more information, see the documentation at: +# # -# 1. Disable multicast discovery (enabled by default): +# ---------------------------------- Various ----------------------------------- # -#discovery.zen.ping.multicast.enabled: false +# Disable starting multiple nodes on a single system: # -# 2. Configure an initial list of master nodes in the cluster -# to perform discovery when new nodes (master or data) are started: +# node.max_local_storage_nodes: 1 # -#discovery.zen.ping.unicast.hosts: ["host1", "host2:port"] - -# EC2 discovery allows to use AWS EC2 API in order to perform discovery. +# Require explicit names when deleting indices: # -# You have to install the cloud-aws plugin for enabling the EC2 discovery. -# -# For more information, see -# -# -# See -# for a step-by-step tutorial. - -# GCE discovery allows to use Google Compute Engine API in order to perform discovery. -# -# You have to install the cloud-gce plugin for enabling the GCE discovery. -# -# For more information, see . - -# Azure discovery allows to use Azure API in order to perform discovery. -# -# You have to install the cloud-azure plugin for enabling the Azure discovery. -# -# For more information, see . - -################################## Slow Log ################################## - -# Shard level query and fetch threshold logging. - -#index.search.slowlog.threshold.query.warn: 10s -#index.search.slowlog.threshold.query.info: 5s -#index.search.slowlog.threshold.query.debug: 2s -#index.search.slowlog.threshold.query.trace: 500ms - -#index.search.slowlog.threshold.fetch.warn: 1s -#index.search.slowlog.threshold.fetch.info: 800ms -#index.search.slowlog.threshold.fetch.debug: 500ms -#index.search.slowlog.threshold.fetch.trace: 200ms - -#index.indexing.slowlog.threshold.index.warn: 10s -#index.indexing.slowlog.threshold.index.info: 5s -#index.indexing.slowlog.threshold.index.debug: 2s -#index.indexing.slowlog.threshold.index.trace: 500ms - -################################## GC Logging ################################ - -#monitor.jvm.gc.young.warn: 1000ms -#monitor.jvm.gc.young.info: 700ms -#monitor.jvm.gc.young.debug: 400ms - -#monitor.jvm.gc.old.warn: 10s -#monitor.jvm.gc.old.info: 5s -#monitor.jvm.gc.old.debug: 2s +# action.destructive_requires_name: true diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py index 345aaa5d5dd..ca3223c0a92 100644 --- a/dev-tools/build_release.py +++ b/dev-tools/build_release.py @@ -57,7 +57,9 @@ Once it's done it will print all the remaining steps. - Python 3k for script execution - Boto for S3 Upload ($ apt-get install python-boto) - RPM for RPM building ($ apt-get install rpm) - - S3 keys exported via ENV Variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) + - S3 keys exported via ENV variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) + - GPG data exported via ENV variables (GPG_KEY_ID, GPG_PASSPHRASE, optionally GPG_KEYRING) + - S3 target repository via ENV variables (S3_BUCKET_SYNC_TO, optionally S3_BUCKET_SYNC_FROM) """ env = os.environ @@ -246,10 +248,13 @@ def build_release(run_tests=False, dry_run=True, cpus=1, bwc_version=None): print('Running Backwards compatibility tests against version [%s]' % (bwc_version)) run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version) run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"') - run_mvn('clean %s -DskipTests' % (target)) + gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE')) + if env.get('GPG_KEYRING'): + gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING') + run_mvn('clean %s -DskipTests %s' % (target, gpg_args)) success = False try: - run_mvn('-DskipTests rpm:rpm') + run_mvn('-DskipTests rpm:rpm %s' % (gpg_args)) success = True finally: if not success: @@ -502,6 +507,14 @@ def publish_artifacts(artifacts, base='elasticsearch/elasticsearch', dry_run=Tru # requires boto to be installed but it is not available on python3k yet so we use a dedicated tool run('python %s/upload-s3.py --file %s ' % (location, os.path.abspath(artifact))) +def publish_repositories(version, dry_run=True): + if dry_run: + print('Skipping package repository update') + else: + print('Triggering repository update - calling dev-tools/build_repositories.sh %s' % version) + # src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this + run('dev-tools/build_repositories.sh %s' % src_branch) + def print_sonatype_notice(): settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml') if os.path.isfile(settings): @@ -536,6 +549,16 @@ def check_s3_credentials(): if not env.get('AWS_ACCESS_KEY_ID', None) or not env.get('AWS_SECRET_ACCESS_KEY', None): raise RuntimeError('Could not find "AWS_ACCESS_KEY_ID" / "AWS_SECRET_ACCESS_KEY" in the env variables please export in order to upload to S3') +def check_gpg_credentials(): + if not env.get('GPG_KEY_ID', None) or not env.get('GPG_PASSPHRASE', None): + raise RuntimeError('Could not find "GPG_KEY_ID" / "GPG_PASSPHRASE" in the env variables please export in order to sign the packages (also make sure that GPG_KEYRING is set when not in ~/.gnupg)') + +def check_command_exists(name, cmd): + try: + subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError: + raise RuntimeError('Could not run command %s - please make sure it is installed' % (name)) + VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java' POM_FILE = 'pom.xml' @@ -628,9 +651,16 @@ if __name__ == '__main__': if os.path.exists(LOG): raise RuntimeError('please remove old release log %s first' % LOG) + + check_gpg_credentials() + check_command_exists('gpg', 'gpg --version') + check_command_exists('expect', 'expect -v') if not dry_run: check_s3_credentials() + check_command_exists('createrepo', 'createrepo --version') + check_command_exists('s3cmd', 's3cmd --version') + check_command_exists('apt-ftparchive', 'apt-ftparchive --version') print('WARNING: dryrun is set to "false" - this will push and publish the release') input('Press Enter to continue...') @@ -687,6 +717,8 @@ if __name__ == '__main__': merge_tag_push(remote, src_branch, release_version, dry_run) print(' publish artifacts to S3 -- dry_run: %s' % dry_run) publish_artifacts(artifacts_and_checksum, dry_run=dry_run) + print(' Updating package repositories -- dry_run: %s' % dry_run) + publish_repositories(src_branch, dry_run=dry_run) cherry_pick_command = '.' if version_head_hash: cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash) diff --git a/dev-tools/build_repositories.sh b/dev-tools/build_repositories.sh new file mode 100755 index 00000000000..9bad8ff2c14 --- /dev/null +++ b/dev-tools/build_repositories.sh @@ -0,0 +1,247 @@ +#!/bin/bash + +# Licensed to Elasticsearch under one or more contributor +# license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright +# ownership. Elasticsearch licenses this file to you under +# the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on +# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + + +# This tool uploads the debian and RPM packages to the specified S3 buckets +# The packages get signed as well +# A requirement is the sync of the existing repository + +set -e + +################### +## environment variables +## +## required +## +## GPG_PASSPHRASE: Passphrase of your GPG key +## GPG_KEY_ID: Key id of your GPG key +## AWS_ACCESS_KEY_ID: AWS access key id +## AWS_SECRET_ACCESS_KEY: AWS secret access key +## S3_BUCKET_SYNC_TO Bucket to write packages to, should be set packages.elasticsearch.org for a regular release +## +## +## optional +## +## S3_BUCKET_SYNC_FROM Bucket to read packages from, defaults to packages.elasticsearch.org +## KEEP_DIRECTORIES Allows to keep all the generated directory structures for debugging +## GPG_KEYRING Configure GPG keyring home, defaults to ~/.gnupg/ +## +################### + + + +################### +## configuration +################### + +# No trailing slashes! +if [ -z $S3_BUCKET_SYNC_FROM ] ; then + S3_BUCKET_SYNC_FROM="packages.elasticsearch.org" +fi +if [ ! -z $GPG_KEYRING ] ; then + GPG_HOMEDIR="--homedir ${GPG_KEYRING}" +fi + +################### +## parameters +################### + +# Must be major and minor version, i.e. 1.5 instead of 1.5.0 +version=$1 + +################### +## prerequisites +################### + +if [ "$#" != "1" ] || [ "x$1" == "x-h" ] || [ "x$1" == "x--help" ] ; then + echo "Usage: $0 version" + echo + echo " version: The elasticsearch major and minor version, i.e. 1.5" + exit +fi + +echo "Checking for correct environment" + +error="" + +if [ -z "$GPG_PASSPHRASE" ] ; then + echo "Environment variable GPG_PASSPHRASE is not set" + error="true" +fi + +if [ -z "$S3_BUCKET_SYNC_TO" ] ; then + echo "Environment variable S3_BUCKET_SYNC_TO is not set" + error="true" +fi + +if [ -z "$GPG_KEY_ID" ] ; then + echo "Environment variable GPG_KEY_ID is not set" + error="true" +fi + +if [ -z "$AWS_ACCESS_KEY_ID" ] ; then + echo "Environment variable AWS_ACCESS_KEY_ID is not set" + error="true" +fi + +if [ -z "$AWS_SECRET_ACCESS_KEY" ] ; then + echo "Environment variable AWS_SECRET_ACCESS_KEY is not set" + error="true" +fi + +if [ "x$error" == "xtrue" ] ; then + echo "Please set all of the above environment variables first. Exiting..." + exit +fi + +echo "Checking for available command line tools:" + +check_for_command() { + echo -n " $1" + if [ -z "`which $1`" ]; then + echo "NO" + error="true" + else + echo "ok" + fi +} + +error="" +check_for_command "createrepo" +check_for_command "s3cmd" +check_for_command "apt-ftparchive" +check_for_command "gpg" +check_for_command "expect" # needed for the RPM plugin + +if [ "x$error" == "xtrue" ] ; then + echo "Please install all of the above tools first. Exiting..." + exit +fi + +################### +## setup +################### +tempdir=`mktemp -d /tmp/elasticsearch-repo.XXXX` +mkdir -p $tempdir + +# create custom s3cmd conf, in case s3cmd does not support --aws-secret-key like on ubuntu +( cat < $tempdir/.s3cmd +s3cmd="s3cmd -c $tempdir/.s3cmd" + +################### +## RPM +################### + +centosdir=$tempdir/repository/elasticsearch/$version/centos +mkdir -p $centosdir + +echo "RPM: Syncing repository for version $version into $centosdir" +$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/centos/ $centosdir + +rpm=target/rpm/elasticsearch/RPMS/noarch/elasticsearch*.rpm +echo "RPM: Copying $rpm into $centosdor" +cp $rpm $centosdir + +echo "RPM: Running createrepo in $centosdir" +createrepo --update $centosdir + +echo "RPM: Resigning repomd.xml" +rm -f $centosdir/repodata/repomd.xml.asc +gpg $GPG_HOMEDIR --passphrase "$GPG_PASSPHRASE" -a -b -o $centosdir/repodata/repomd.xml.asc $centosdir/repodata/repomd.xml + +echo "RPM: Syncing back repository for $version into S3 bucket $S3_BUCKET_SYNC_TO" +$s3cmd sync -P $centosdir/ s3://$S3_BUCKET_SYNC_TO/elasticsearch/$version/centos/ + +################### +## DEB +################### + +deb=target/releases/elasticsearch*.deb + +echo "DEB: Creating repository directory structure" + +if [ -z $tempdir ] ; then + echo "DEB: Could not create tempdir directory name, exiting" + exit +fi + +debbasedir=$tempdir/repository/elasticsearch/$version/debian +mkdir -p $debbasedir + + +echo "DEB: Syncing debian repository of version $version to $debbasedir" +# sync all former versions into directory +$s3cmd sync s3://$S3_BUCKET_SYNC_FROM/elasticsearch/$version/debian/ $debbasedir + +# create directories in case of a new release so that syncing did not create this structure +mkdir -p $debbasedir/dists/stable/main/binary-all +mkdir -p $debbasedir/dists/stable/main/binary-i386 +mkdir -p $debbasedir/dists/stable/main/binary-amd64 +mkdir -p $debbasedir/.cache +mkdir -p $debbasedir/pool/main + +# create elasticsearch-1.4.conf +( cat < $tempdir/elasticsearch-$version-releases.conf + +# create packages file using apt-ftparchive +mkdir -p $debbasedir/dists/stable/main/binary-all +mkdir -p $debbasedir/pool/main/e/elasticsearch + +echo "DEB: Copying $deb to elasticsearch repo directory" +cp $deb $debbasedir/pool/main/e/elasticsearch + +echo "DEB: Creating new Packages and Release files" +cd $debbasedir +apt-ftparchive packages pool > dists/stable/main/binary-all/Packages +cat dists/stable/main/binary-all/Packages | gzip -9 > dists/stable/main/binary-all/Packages.gz +cp dists/stable/main/binary-all/Packages* dists/stable/main/binary-i386/ +cp dists/stable/main/binary-all/Packages* dists/stable/main/binary-amd64/ +apt-ftparchive -c $tempdir/elasticsearch-$version-releases.conf release $debbasedir/dists/stable/ > $debbasedir/dists/stable/Release + +echo "DEB: Signing newly created release file at $debbasedir/dists/stable/Release.gpg" +rm -f $debbasedir/dists/stable/Release.gpg +gpg $GPG_HOMEDIR --passphrase "$GPG_PASSPHRASE" -a -b -o $debbasedir/dists/stable/Release.gpg $debbasedir/dists/stable/Release + +# upload to S3 +echo "DEB: Uploading to S3 bucket to $S3_BUCKET_SYNC_TO" +$s3cmd sync -P $debbasedir/ s3://$S3_BUCKET_SYNC_TO/elasticsearch/$version/debian/ + +# back to original dir +cd - + +# delete directories unless configured otherwise +if [ -z $KEEP_DIRECTORIES ] ; then + echo "Done! Deleting repository directories at $tempdir" + rm -fr $tempdir +else + echo "Done! Keeping repository directories at $tempdir" +fi diff --git a/dev-tools/forbidden/core-signatures.txt b/dev-tools/forbidden/core-signatures.txt index 2a662a60974..acd66985081 100644 --- a/dev-tools/forbidden/core-signatures.txt +++ b/dev-tools/forbidden/core-signatures.txt @@ -39,9 +39,6 @@ org.apache.lucene.index.IndexReader#decRef() org.apache.lucene.index.IndexReader#incRef() org.apache.lucene.index.IndexReader#tryIncRef() -@defaultMessage QueryWrapperFilter is cacheable by default - use Queries#wrap instead -org.apache.lucene.search.QueryWrapperFilter#(org.apache.lucene.search.Query) - @defaultMessage Pass the precision step from the mappings explicitly instead org.apache.lucene.search.NumericRangeQuery#newDoubleRange(java.lang.String,java.lang.Double,java.lang.Double,boolean,boolean) org.apache.lucene.search.NumericRangeQuery#newFloatRange(java.lang.String,java.lang.Float,java.lang.Float,boolean,boolean) diff --git a/docs/README.md b/docs/README.asciidoc similarity index 100% rename from docs/README.md rename to docs/README.asciidoc diff --git a/docs/community/clients.asciidoc b/docs/community/clients.asciidoc index 5455440e114..e0205816ca0 100644 --- a/docs/community/clients.asciidoc +++ b/docs/community/clients.asciidoc @@ -50,13 +50,13 @@ See the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client] * https://github.com/ddnexus/flex[Flex]: Ruby Client. - + * https://github.com/printercu/elastics-rb[elastics]: Tiny client with built-in zero-downtime migrations and ActiveRecord integration. - + * https://github.com/toptal/chewy[chewy]: - Chewy is ODM and wrapper for official elasticsearch client - + Chewy is ODM and wrapper for official elasticsearch client + * https://github.com/ankane/searchkick[Searchkick]: Intelligent search made easy @@ -82,7 +82,7 @@ See the {client}/php-api/current/index.html[official Elasticsearch PHP client]. * https://github.com/searchbox-io/Jest[Jest]: Java Rest client. -* There is of course the http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current/index.html[native ES Java client] +* There is of course the {client}/java-api/current/index.html[native ES Java client] [[community-javascript]] === JavaScript diff --git a/docs/community/index.asciidoc b/docs/community/index.asciidoc index 88135d89563..48b2f2ad8c1 100644 --- a/docs/community/index.asciidoc +++ b/docs/community/index.asciidoc @@ -1,6 +1,6 @@ = Community Supported Clients -:client: http://www.elasticsearch.org/guide/en/elasticsearch/client +:client: http://www.elastic.co/guide/en/elasticsearch/client include::clients.asciidoc[] diff --git a/docs/groovy-api/index.asciidoc b/docs/groovy-api/index.asciidoc index 5ab4bf61318..5e06cd1f2f4 100644 --- a/docs/groovy-api/index.asciidoc +++ b/docs/groovy-api/index.asciidoc @@ -1,6 +1,6 @@ = Groovy API -:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current -:java: http://www.elasticsearch.org/guide/en/elasticsearch/client/java-api/current +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current +:java: http://www.elastic.co/guide/en/elasticsearch/client/java-api/current [preface] == Preface diff --git a/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc index f19b69ff89f..20d8db036d9 100644 --- a/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc +++ b/docs/java-api/aggregations/metrics/extendedstats-aggregation.asciidoc @@ -31,7 +31,7 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStat [source,java] -------------------------------------------------- // sr is here your SearchResponse object -Stats agg = sr.getAggregations().get("agg"); +ExtendedStats agg = sr.getAggregations().get("agg"); double min = agg.getMin(); double max = agg.getMax(); double avg = agg.getAvg(); diff --git a/docs/java-api/bulk.asciidoc b/docs/java-api/bulk.asciidoc index 9ac61f47f30..96b0b2eb6dc 100644 --- a/docs/java-api/bulk.asciidoc +++ b/docs/java-api/bulk.asciidoc @@ -99,3 +99,22 @@ By default, `BulkProcessor`: * does not set flushInterval * sets concurrentRequests to 1 +When all documents are loaded to the `BulkProcessor` it can be closed by using `awaitClose` or `close` methods: + +[source,java] +-------------------------------------------------- +bulkProcessor.awaitClose(10, TimeUnit.MINUTES); +-------------------------------------------------- + +or + +[source,java] +-------------------------------------------------- +bulkProcessor.close(); +-------------------------------------------------- + +Both methods flush any remaining documents and disable all other scheduled flushes if they were scheduled by setting +`flushInterval`. If concurrent requests were enabled the `awaitClose` method waits for up to the specified timeout for +all bulk requests to complete then returns `true`, if the specified waiting time elapses before all bulk requests complete, +`false` is returned. The `close` method doesn't wait for any remaining bulk requests to complete and exists immediately. + diff --git a/docs/java-api/index.asciidoc b/docs/java-api/index.asciidoc index e626fcad7bd..6145e2918d8 100644 --- a/docs/java-api/index.asciidoc +++ b/docs/java-api/index.asciidoc @@ -1,6 +1,6 @@ [[java-api]] = Java API -:ref: http://www.elasticsearch.org/guide/en/elasticsearch/reference/current +:ref: http://www.elastic.co/guide/en/elasticsearch/reference/current [preface] == Preface diff --git a/docs/java-api/query-dsl-queries.asciidoc b/docs/java-api/query-dsl-queries.asciidoc index afded7d9785..92e0982d4e5 100644 --- a/docs/java-api/query-dsl-queries.asciidoc +++ b/docs/java-api/query-dsl-queries.asciidoc @@ -234,7 +234,7 @@ QueryBuilder qb = matchAllQuery(); [[mlt]] -=== More Like This (Field) Query (mlt and mlt_field) +=== More Like This Query (mlt) See: * {ref}/query-dsl-mlt-query.html[More Like This Query] diff --git a/docs/javascript/index.asciidoc b/docs/javascript/index.asciidoc deleted file mode 100644 index 67a2a73a2e6..00000000000 --- a/docs/javascript/index.asciidoc +++ /dev/null @@ -1,138 +0,0 @@ -= elasticsearch-js - -== Overview - -Official low-level client for Elasticsearch. Its goal is to provide common -ground for all Elasticsearch-related code in JavaScript; because of this it tries -to be opinion-free and very extendable. - -The full documentation is available at http://elasticsearch.github.io/elasticsearch-js - - -=== Getting the Node.js module - -To install the module into an existing Node.js project use npm: - -[source,sh] ------------------------------------- -npm install elasticsearch ------------------------------------- - -=== Getting the browser client - -For a browser-based projects, builds for modern browsers are available http://elasticsearch.github.io/elasticsearch-js#browser-builds[here]. Download one of the archives and extract it, inside you'll find three files, pick the one that best matches your environment: - - * elasticsearch.jquery.js - for projects that already use jQuery - * elasticsearch.angular.js - for Angular projects - * elasticsearch.js - generic build for all other projects - -Each of the library specific builds tie into the AJAX and Promise creation facilities provided by their respective libraries. This is an example of how Elasticsearch.js can be extended to provide a more opinionated approach when appropriate. - -=== Setting up the client - -Now you are ready to get busy! First thing you'll need to do is create an instance of `elasticsearch.Client`. Here are several examples of configuration parameters you can use when creating that instance. For a full list of configuration options see http://elasticsearch.github.io/elasticsearch-js/index.html#configuration[the configuration docs]. - -[source,javascript] ------------------------------------- -var elasticsearch = require('elasticsearch'); - -// Connect to localhost:9200 and use the default settings -var client = new elasticsearch.Client(); - -// Connect the client to two nodes, requests will be -// load-balanced between them using round-robin -var client = elasticsearch.Client({ - hosts: [ - 'elasticsearch1:9200', - 'elasticsearch2:9200' - ] -}); - -// Connect to the this host's cluster, sniff -// for the rest of the cluster right away, and -// again every 5 minutes -var client = elasticsearch.Client({ - host: 'elasticsearch1:9200', - sniffOnStart: true, - sniffInterval: 300000 -}); - -// Connect to this host using https, basic auth, -// a path prefix, and static query string values -var client = new elasticsearch.Client({ - host: 'https://user:password@elasticsearch1/search?app=blog' -}); ------------------------------------- - - -=== Setting up the client in the browser - -The params accepted by the `Client` constructor are the same in the browser versions of the client, but how you access the Client constructor is different based on the build you are using. Below is an example of instantiating a client in each build. - -[source,javascript] ------------------------------------- -// elasticsearch.js adds the elasticsearch namespace to the window -var client = elasticsearch.Client({ ... }); - -// elasticsearch.jquery.js adds the es namespace to the jQuery object -var client = jQuery.es.Client({ ... }); - -// elasticsearch.angular.js creates an elasticsearch -// module, which provides an esFactory -var app = angular.module('app', ['elasticsearch']); -app.service('es', function (esFactory) { - return esFactory({ ... }); -}); ------------------------------------- - -=== Using the client instance to make API calls. - -Once you create the client, making API calls is simple. - -[source,javascript] ------------------------------------- -// get the current status of the entire cluster. -// Note: params are always optional, you can just send a callback -client.cluster.health(function (err, resp) { - if (err) { - console.error(err.message); - } else { - console.dir(resp); - } -}); - -// index a document -client.index({ - index: 'blog', - type: 'post', - id: 1, - body: { - title: 'JavaScript Everywhere!', - content: 'It all started when...', - date: '2013-12-17' - } -}, function (err, resp) { - // ... -}); - -// search for documents (and also promises!!) -client.search({ - index: 'users', - size: 50, - body: { - query: { - match: { - profile: 'elasticsearch' - } - } - } -}).then(function (resp) { - var hits = resp.body.hits; -}); ------------------------------------- - -== Copyright and License - -This software is Copyright (c) 2013-2015 by Elasticsearch BV. - -This is free software, licensed under The Apache License Version 2.0. diff --git a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc index 4091296a76e..7c919b56b98 100644 --- a/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/pattern-capture-tokenfilter.asciidoc @@ -82,7 +82,7 @@ curl -XPUT localhost:9200/test/ -d ' "type" : "pattern_capture", "preserve_original" : 1, "patterns" : [ - "(\\w+)", + "([^@]+)", "(\\p{L}+)", "(\\d+)", "@(.+)" @@ -108,9 +108,10 @@ When the above analyzer is used on an email address like: john-smith_123@foo-bar.com -------------------------------------------------- -it would produce the following tokens: [ `john-smith_123`, -`foo-bar.com`, `john`, `smith_123`, `smith`, `123`, `foo`, -`foo-bar.com`, `bar`, `com` ] +it would produce the following tokens: + + john-smith_123@foo-bar.com, john-smith_123, + john, smith, 123, foo-bar.com, foo, bar, com Multiple patterns are required to allow overlapping captures, but also means that patterns are less dense and easier to understand. diff --git a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc index 9ce81e1ac9f..edb3f3b5590 100644 --- a/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/word-delimiter-tokenfilter.asciidoc @@ -16,27 +16,27 @@ ignored: "//hello---there, 'dude'" -> "hello", "there", "dude" Parameters include: -`generate_word_parts`:: +`generate_word_parts`:: If `true` causes parts of words to be generated: "PowerShot" => "Power" "Shot". Defaults to `true`. -`generate_number_parts`:: +`generate_number_parts`:: If `true` causes number subwords to be generated: "500-42" => "500" "42". Defaults to `true`. -`catenate_words`:: +`catenate_words`:: If `true` causes maximum runs of word parts to be catenated: "wi-fi" => "wifi". Defaults to `false`. -`catenate_numbers`:: +`catenate_numbers`:: If `true` causes maximum runs of number parts to be catenated: "500-42" => "50042". Defaults to `false`. -`catenate_all`:: +`catenate_all`:: If `true` causes all subword parts to be catenated: "wi-fi-4000" => "wifi4000". Defaults to `false`. -`split_on_case_change`:: +`split_on_case_change`:: If `true` causes "PowerShot" to be two tokens; ("Power-Shot" remains two parts regards). Defaults to `true`. @@ -44,29 +44,29 @@ Parameters include: If `true` includes original words in subwords: "500-42" => "500-42" "500" "42". Defaults to `false`. -`split_on_numerics`:: +`split_on_numerics`:: If `true` causes "j2se" to be three tokens; "j" "2" "se". Defaults to `true`. -`stem_english_possessive`:: +`stem_english_possessive`:: If `true` causes trailing "'s" to be removed for each subword: "O'Neil's" => "O", "Neil". Defaults to `true`. Advance settings include: -`protected_words`:: +`protected_words`:: A list of protected words from being delimiter. Either an array, or also can set `protected_words_path` which resolved to a file configured with protected words (one on each line). Automatically resolves to `config/` based location if exists. -`type_table`:: +`type_table`:: A custom type mapping table, for example (when configured using `type_table_path`): [source,js] -------------------------------------------------- - # Map the $, %, '.', and ',' characters to DIGIT + # Map the $, %, '.', and ',' characters to DIGIT # This might be useful for financial data. $ => DIGIT % => DIGIT @@ -78,3 +78,9 @@ Advance settings include: # see http://en.wikipedia.org/wiki/Zero-width_joiner \\u200D => ALPHANUM -------------------------------------------------- + +NOTE: Using a tokenizer like the `standard` tokenizer may interfere with +the `catenate_*` and `preserve_original` parameters, as the original +string may already have lost punctuation during tokenization. Instead, +you may want to use the `whitespace` tokenizer. + diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index 088de8d2ccf..4c823119daf 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -46,5 +46,3 @@ include::cluster/nodes-stats.asciidoc[] include::cluster/nodes-info.asciidoc[] include::cluster/nodes-hot-threads.asciidoc[] - -include::cluster/nodes-shutdown.asciidoc[] diff --git a/docs/reference/cluster/nodes-shutdown.asciidoc b/docs/reference/cluster/nodes-shutdown.asciidoc deleted file mode 100644 index 65030a384c8..00000000000 --- a/docs/reference/cluster/nodes-shutdown.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -[[cluster-nodes-shutdown]] -== Nodes Shutdown - -The nodes shutdown API allows to shutdown one or more (or all) nodes in -the cluster. Here is an example of shutting the `_local` node the -request is directed to: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown' --------------------------------------------------- - -Specific node(s) can be shutdown as well using their respective node ids -(or other selective options as explained -<> .): - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/nodeId1,nodeId2/_shutdown' --------------------------------------------------- - -The master (of the cluster) can also be shutdown using: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_master/_shutdown' --------------------------------------------------- - -Finally, all nodes can be shutdown using one of the options below: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_shutdown' - -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_shutdown' - -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_all/_shutdown' --------------------------------------------------- - -[float] -[[delay]] -=== Delay - -By default, the shutdown will be executed after a 1 second delay (`1s`). -The delay can be customized by setting the `delay` parameter in a time -value format. For example: - -[source,js] --------------------------------------------------- -$ curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown?delay=10s' --------------------------------------------------- - -[float] -=== Disable Shutdown - -The shutdown API can be disabled by setting `action.disable_shutdown` in -the node configuration. diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc index 438b04d4094..a0f7bbaa976 100644 --- a/docs/reference/cluster/update-settings.asciidoc +++ b/docs/reference/cluster/update-settings.asciidoc @@ -153,9 +153,6 @@ due to forced awareness or allocation filtering. `indices.cache.filter.size`:: See <> -`indices.cache.filter.expire` (time):: - See <> - [float] ==== TTL interval diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index d5e9adbbf8b..358c5f09ef8 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -89,7 +89,7 @@ The number of shards and replicas can be defined per index at the time the index By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. -NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. +NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. You can monitor shard sizes using the <> api. With that out of the way, let's get started with the fun part... @@ -104,13 +104,13 @@ java -version echo $JAVA_HOME -------------------------------------------------- -Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elasticsearch.org/download[`www.elasticsearch.org/download`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. +Once we have Java set up, we can then download and run Elasticsearch. The binaries are available from http://www.elastic.co/downloads[`www.elastic.co/downloads`] along with all the releases that have been made in the past. For each release, you have a choice among a `zip` or `tar` archive, or a `DEB` or `RPM` package. For simplicity, let's use the tar file. Let's download the Elasticsearch {version} tar as follows (Windows users should download the zip package): ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -curl -L -O https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz +curl -L -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-{version}.tar.gz -------------------------------------------------- Then extract it as follows (Windows users should unzip the zip package): @@ -868,7 +868,7 @@ In the previous section, we skipped over a little detail called the document sco All queries in Elasticsearch trigger computation of the relevance scores. In cases where we do not need the relevance scores, Elasticsearch provides another query capability in the form of <. Filters are similar in concept to queries except that they are optimized for much faster execution speeds for two primary reasons: * Filters do not score so they are faster to execute than queries -* Filters can be http://www.elasticsearch.org/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries +* Filters can be http://www.elastic.co/blog/all-about-elasticsearch-filter-bitsets/[cached in memory] allowing repeated search executions to be significantly faster than queries To understand filters, let's first introduce the <>, which allows you to combine a query (like `match_all`, `match`, `bool`, etc.) together with a filter. As an example, let's introduce the <>, which allows us to filter documents by a range of values. This is generally used for numeric or date filtering. diff --git a/docs/reference/images/reducers_movavg/double_0.2beta.png b/docs/reference/images/reducers_movavg/double_0.2beta.png new file mode 100644 index 00000000000..64499b98342 Binary files /dev/null and b/docs/reference/images/reducers_movavg/double_0.2beta.png differ diff --git a/docs/reference/images/reducers_movavg/double_0.7beta.png b/docs/reference/images/reducers_movavg/double_0.7beta.png new file mode 100644 index 00000000000..b9f530227d9 Binary files /dev/null and b/docs/reference/images/reducers_movavg/double_0.7beta.png differ diff --git a/docs/reference/images/reducers_movavg/double_prediction_global.png b/docs/reference/images/reducers_movavg/double_prediction_global.png new file mode 100644 index 00000000000..faee6d22bc2 Binary files /dev/null and b/docs/reference/images/reducers_movavg/double_prediction_global.png differ diff --git a/docs/reference/images/reducers_movavg/double_prediction_local.png b/docs/reference/images/reducers_movavg/double_prediction_local.png new file mode 100644 index 00000000000..930a5cfde9b Binary files /dev/null and b/docs/reference/images/reducers_movavg/double_prediction_local.png differ diff --git a/docs/reference/images/reducers_movavg/linear_100window.png b/docs/reference/images/reducers_movavg/linear_100window.png new file mode 100644 index 00000000000..3a4d51ae956 Binary files /dev/null and b/docs/reference/images/reducers_movavg/linear_100window.png differ diff --git a/docs/reference/images/reducers_movavg/linear_10window.png b/docs/reference/images/reducers_movavg/linear_10window.png new file mode 100644 index 00000000000..1407ded8791 Binary files /dev/null and b/docs/reference/images/reducers_movavg/linear_10window.png differ diff --git a/docs/reference/images/reducers_movavg/movavg_100window.png b/docs/reference/images/reducers_movavg/movavg_100window.png new file mode 100644 index 00000000000..45094ec2681 Binary files /dev/null and b/docs/reference/images/reducers_movavg/movavg_100window.png differ diff --git a/docs/reference/images/reducers_movavg/movavg_10window.png b/docs/reference/images/reducers_movavg/movavg_10window.png new file mode 100644 index 00000000000..1e9f543385f Binary files /dev/null and b/docs/reference/images/reducers_movavg/movavg_10window.png differ diff --git a/docs/reference/images/reducers_movavg/simple_prediction.png b/docs/reference/images/reducers_movavg/simple_prediction.png new file mode 100644 index 00000000000..d74724e1546 Binary files /dev/null and b/docs/reference/images/reducers_movavg/simple_prediction.png differ diff --git a/docs/reference/images/reducers_movavg/single_0.2alpha.png b/docs/reference/images/reducers_movavg/single_0.2alpha.png new file mode 100644 index 00000000000..d96cf771743 Binary files /dev/null and b/docs/reference/images/reducers_movavg/single_0.2alpha.png differ diff --git a/docs/reference/images/reducers_movavg/single_0.7alpha.png b/docs/reference/images/reducers_movavg/single_0.7alpha.png new file mode 100644 index 00000000000..bf7bdd1752e Binary files /dev/null and b/docs/reference/images/reducers_movavg/single_0.7alpha.png differ diff --git a/docs/reference/index-modules.asciidoc b/docs/reference/index-modules.asciidoc index eb85f8d231e..f74eda35bed 100644 --- a/docs/reference/index-modules.asciidoc +++ b/docs/reference/index-modules.asciidoc @@ -15,29 +15,6 @@ all the relevant modules settings can be provided when creating an index There are specific index level settings that are not associated with any specific module. These include: -[[index-compound-format]]`index.compound_format`:: - - experimental[] - Should the compound file format be used (boolean setting). - The compound format was created to reduce the number of open - file handles when using file based storage. However, by default it is set - to `false` as the non-compound format gives better performance. It is important - that OS is configured to give Elasticsearch ``enough'' file handles. - See <>. -+ -Alternatively, `compound_format` can be set to a number between `0` and -`1`, where `0` means `false`, `1` means `true` and a number inbetween -represents a percentage: if the merged segment is less than this -percentage of the total index, then it is written in compound format, -otherwise it is written in non-compound format. - -[[index-compound-on-flush]]`index.compound_on_flush`:: - - experimental[] - Should a new segment (create by indexing, not by merging) be written - in compound format or non-compound format? Defaults to `true`. - This is a dynamic setting. - `index.refresh_interval`:: A time setting controlling how often the refresh operation will be executed. Defaults to `1s`. Can be set to `-1` @@ -59,7 +36,7 @@ otherwise it is written in non-compound format. When `checksum`, check for physical corruption. When `true`, check for both physical and logical corruption. This is much more expensive in terms of CPU and memory usage. - When `fix`, check for both physical and logical corruption, and segments + When `fix`, check for both physical and logical corruption, and segments that were reported as corrupted will be automatically removed. Default value is `false`, which performs no checks. diff --git a/docs/reference/index-modules/allocation.asciidoc b/docs/reference/index-modules/allocation.asciidoc index 9ba0821b3e4..910858f7fcd 100644 --- a/docs/reference/index-modules/allocation.asciidoc +++ b/docs/reference/index-modules/allocation.asciidoc @@ -143,6 +143,21 @@ settings API. By default, Elasticsearch will retrieve information about the disk usage of the nodes every 30 seconds. This can also be changed by setting the `cluster.info.update.interval` setting. +An example of updating the low watermark to no more than 80% of the disk size, a +high watermark of at least 50 gigabytes free, and updating the information about +the cluster every minute: + +[source,js] +-------------------------------------------------- +curl -XPUT localhost:9200/_cluster/settings -d '{ + "transient" : { + "cluster.routing.allocation.disk.watermark.low" : "80%", + "cluster.routing.allocation.disk.watermark.high" : "50gb", + "cluster.info.update.interval" : "1m" + } +}' +-------------------------------------------------- + By default, Elasticsearch will take into account shards that are currently being relocated to the target node when computing a node's disk usage. This can be changed by setting the `cluster.routing.allocation.disk.include_relocations` diff --git a/docs/reference/index-modules/mapper.asciidoc b/docs/reference/index-modules/mapper.asciidoc index 2bbca6c095d..baca199efae 100644 --- a/docs/reference/index-modules/mapper.asciidoc +++ b/docs/reference/index-modules/mapper.asciidoc @@ -32,7 +32,7 @@ mapping specified in the <> or `_default_` mapping. The default mapping definition is a plain mapping definition that is -embedded within ElasticSearch: +embedded within Elasticsearch: [source,js] -------------------------------------------------- @@ -46,11 +46,8 @@ Pretty short, isn't it? Basically, everything is `_default_`ed, including the dynamic nature of the root object mapping which allows new fields to be added automatically. -The built-in default mapping definition can be overridden in several ways. A -`_default_` mapping can be specified when creating a new index, or the global -`_default_` mapping (for all indices) can be configured by creating a file -called `config/default-mapping.json`. (This location can be changed with -the `index.mapper.default_mapping_location` setting.) +The default mapping can be overridden by specifying the `_default_` type when +creating a new index. Dynamic creation of mappings for unmapped types can be completely disabled by setting `index.mapper.dynamic` to `false`. diff --git a/docs/reference/index-modules/merge.asciidoc b/docs/reference/index-modules/merge.asciidoc index 3ad2dd5c0a8..036d2b8d16f 100644 --- a/docs/reference/index-modules/merge.asciidoc +++ b/docs/reference/index-modules/merge.asciidoc @@ -1,7 +1,7 @@ [[index-modules-merge]] == Merge -experimental[] +experimental[All of the settings exposed in the `merge` module are expert only and may be removed in the future] A shard in elasticsearch is a Lucene index, and a Lucene index is broken down into segments. Segments are internal storage elements in the index @@ -72,12 +72,6 @@ This policy has the following settings: Higher values favor selecting merges that reclaim deletions. A value of `0.0` means deletions don't impact merge selection. Defaults to `2.0`. -`index.compound_format`:: - - Should the index be stored in compound format or not. Defaults to `false`. - See <> in - <>. - For normal merging, this policy first computes a "budget" of how many segments are allowed to be in the index. If the index is over-budget, then the policy sorts segments by decreasing size (proportionally considering percent diff --git a/docs/reference/index-modules/store.asciidoc b/docs/reference/index-modules/store.asciidoc index b34536db811..12fcf0c3509 100644 --- a/docs/reference/index-modules/store.asciidoc +++ b/docs/reference/index-modules/store.asciidoc @@ -1,8 +1,6 @@ [[index-modules-store]] == Store -experimental[] - The store module allows you to control how index data is stored. The index can either be stored in-memory (no persistence) or on-disk @@ -20,6 +18,7 @@ heap space* using the "Memory" (see below) storage type. It translates to the fact that there is no need for extra large JVM heaps (with their own consequences) for storing the index in memory. +experimental[All of the settings exposed in the `store` module are expert only and may be removed in the future] [float] [[file-system]] @@ -28,7 +27,7 @@ own consequences) for storing the index in memory. File system based storage is the default storage used. There are different implementations or _storage types_. The best one for the operating environment will be automatically chosen: `mmapfs` on -Windows 64bit, `simplefs` on Windows 32bit, and `default` +Windows 64bit, `simplefs` on Windows 32bit, and `default` (hybrid `niofs` and `mmapfs`) for the rest. This can be overridden for all indices by adding this to the diff --git a/docs/reference/index.asciidoc b/docs/reference/index.asciidoc index 3288cad5855..1e63d18a4d2 100644 --- a/docs/reference/index.asciidoc +++ b/docs/reference/index.asciidoc @@ -1,7 +1,7 @@ [[elasticsearch-reference]] = Reference -:version: 1.5.1 +:version: 1.5.2 :branch: 1.5 :jdk: 1.8.0_25 :defguide: https://www.elastic.co/guide/en/elasticsearch/guide/current diff --git a/docs/reference/indices/segments.asciidoc b/docs/reference/indices/segments.asciidoc index f432697f54d..f2d51068a64 100644 --- a/docs/reference/indices/segments.asciidoc +++ b/docs/reference/indices/segments.asciidoc @@ -78,7 +78,7 @@ compound:: Whether the segment is stored in a compound file. When true, this To add additional information that can be used for debugging, use the `verbose` flag. -NOTE: The format of additional verbose information is experimental and can change at any time. +experimental[The format of the additional verbose information is experimental and can change at any time] [source,js] -------------------------------------------------- @@ -108,7 +108,7 @@ Response: }, ... ] - + } ... } diff --git a/docs/reference/indices/update-settings.asciidoc b/docs/reference/indices/update-settings.asciidoc index 3810bfbdca0..d4888103eb2 100644 --- a/docs/reference/indices/update-settings.asciidoc +++ b/docs/reference/indices/update-settings.asciidoc @@ -12,7 +12,7 @@ of the request includes the updated settings, for example: { "index" : { "number_of_replicas" : 4 - } + } } -------------------------------------------------- @@ -25,7 +25,7 @@ curl -XPUT 'localhost:9200/my_index/_settings' -d ' { "index" : { "number_of_replicas" : 4 - } + } }' -------------------------------------------------- @@ -61,9 +61,6 @@ settings API: `index.refresh_interval`:: The async refresh interval of a shard. -`index.index_concurrency`:: - experimental[] Defaults to `8`. - `index.translog.flush_threshold_ops`:: When to flush based on operations. @@ -151,14 +148,6 @@ settings API: `index.translog.fs.type`:: experimental[] Either `simple` or `buffered` (default). -`index.compound_format`:: - experimental[] See <> in - <>. - -`index.compound_on_flush`:: - experimental[] See <> in - <>. - <>:: All the settings for slow log. diff --git a/docs/reference/mapping.asciidoc b/docs/reference/mapping.asciidoc index 945d5a49ffc..7e11fe658a2 100644 --- a/docs/reference/mapping.asciidoc +++ b/docs/reference/mapping.asciidoc @@ -71,8 +71,6 @@ include::mapping/date-format.asciidoc[] include::mapping/dynamic-mapping.asciidoc[] -include::mapping/conf-mappings.asciidoc[] - include::mapping/meta.asciidoc[] include::mapping/transform.asciidoc[] diff --git a/docs/reference/mapping/conf-mappings.asciidoc b/docs/reference/mapping/conf-mappings.asciidoc deleted file mode 100644 index e9bb3f91f93..00000000000 --- a/docs/reference/mapping/conf-mappings.asciidoc +++ /dev/null @@ -1,19 +0,0 @@ -[[mapping-conf-mappings]] -== Config Mappings - -Creating new mappings can be done using the -<> -API. When a document is indexed with no mapping associated with it in -the specific index, the -<> feature will kick in and automatically create mapping -definition for it. - -Mappings can also be provided on the node level, meaning that each index -created will automatically be started with all the mappings defined -within a certain location. - -Mappings can be defined within files called `[mapping_name].json` and be -placed either under `config/mappings/_default` location, or under -`config/mappings/[index_name]` (for mappings that should be associated -only with a specific index). diff --git a/docs/reference/mapping/dynamic-mapping.asciidoc b/docs/reference/mapping/dynamic-mapping.asciidoc index abcfbc650e1..91ecd6b0c2d 100644 --- a/docs/reference/mapping/dynamic-mapping.asciidoc +++ b/docs/reference/mapping/dynamic-mapping.asciidoc @@ -21,12 +21,8 @@ embedded within the distribution: -------------------------------------------------- Pretty short, isn't it? Basically, everything is defaulted, especially the -dynamic nature of the root object mapping. The default mapping -definition can be overridden in several manners. The simplest manner is -to simply define a file called `default-mapping.json` and to place it -under the `config` directory (which can be configured to exist in a -different location). It can also be explicitly set using the -`index.mapper.default_mapping_location` setting. +dynamic nature of the root object mapping. The default mapping can be +overridden by specifying the `_default_` type when creating a new index. The dynamic creation of mappings for unmapped types can be completely disabled by setting `index.mapper.dynamic` to `false`. diff --git a/docs/reference/mapping/fields/source-field.asciidoc b/docs/reference/mapping/fields/source-field.asciidoc index f09c192f57a..8f57613dbe1 100644 --- a/docs/reference/mapping/fields/source-field.asciidoc +++ b/docs/reference/mapping/fields/source-field.asciidoc @@ -20,22 +20,3 @@ example: } } -------------------------------------------------- - -[float] -[[include-exclude]] -==== Includes / Excludes - -Allow to specify paths in the source that would be included / excluded -when it's stored, supporting `*` as wildcard annotation. For example: - -[source,js] --------------------------------------------------- -{ - "my_type" : { - "_source" : { - "includes" : ["path1.*", "path2.*"], - "excludes" : ["path3.*"] - } - } -} --------------------------------------------------- diff --git a/docs/reference/mapping/types/array-type.asciidoc b/docs/reference/mapping/types/array-type.asciidoc index 3f3d832d66f..f2dc40ed094 100644 --- a/docs/reference/mapping/types/array-type.asciidoc +++ b/docs/reference/mapping/types/array-type.asciidoc @@ -67,8 +67,3 @@ the fact that the following JSON document is perfectly fine: } -------------------------------------------------- -Note also, that thanks to the fact that we used the `index_name` to use -the non plural form (`tag` instead of `tags`), we can actually refer to -the field using the `index_name` as well. For example, we can execute a -query using `tweet.tags:wow` or `tweet.tag:wow`. We could, of course, -name the field as `tag` and skip the `index_name` all together). diff --git a/docs/reference/mapping/types/core-types.asciidoc b/docs/reference/mapping/types/core-types.asciidoc index e650ded89ca..1f6dcc01cb5 100644 --- a/docs/reference/mapping/types/core-types.asciidoc +++ b/docs/reference/mapping/types/core-types.asciidoc @@ -426,6 +426,9 @@ and it can be retrieved from it). in `_source`, have `include_in_all` enabled, or `store` be set to `true` for this to be useful. +|`doc_values` |Set to `true` to store field values in a column-stride fashion. +Automatically set to `true` when the fielddata format is `doc_values`. + |`boost` |The boost value. Defaults to `1.0`. |`null_value` |When there is a (JSON) null value for the field, use the diff --git a/docs/reference/mapping/types/nested-type.asciidoc b/docs/reference/mapping/types/nested-type.asciidoc index e2845bddf93..21b1110faf3 100644 --- a/docs/reference/mapping/types/nested-type.asciidoc +++ b/docs/reference/mapping/types/nested-type.asciidoc @@ -139,6 +139,10 @@ Nested fields may contain other nested fields. The `include_in_parent` object refers to the direct parent of the field, while the `include_in_root` parameter refers only to the topmost ``root'' object or document. +NOTE: The `include_in_parent` and `include_in_root` options do not apply +to <>, which are only ever +indexed inside the nested document. + Nested docs will automatically use the root doc `_all` field only. .Internal Implementation diff --git a/docs/reference/mapping/types/root-object-type.asciidoc b/docs/reference/mapping/types/root-object-type.asciidoc index fe16264b73f..a8e8ea000cd 100644 --- a/docs/reference/mapping/types/root-object-type.asciidoc +++ b/docs/reference/mapping/types/root-object-type.asciidoc @@ -16,27 +16,6 @@ specifying the `tweet` type in the document itself: } -------------------------------------------------- -[float] -==== Index / Search Analyzers - -The root object allows to define type mapping level analyzers for index -and search that will be used with all different fields that do not -explicitly set analyzers on their own. Here is an example: - -[source,js] --------------------------------------------------- -{ - "tweet" : { - "analyzer" : "standard", - "search_analyzer" : "standard_with_synonyms" - } -} --------------------------------------------------- - -The above simply explicitly defines both the `analyzer` and -`search_analyzer` that will be used. If `search_analyzer` is not specified, -it defaults to the value of `analyzer`. - [float] ==== dynamic_date_formats diff --git a/docs/reference/migration/migrate_1_0.asciidoc b/docs/reference/migration/migrate_1_0.asciidoc index aca40b33efb..f6cfd4f92a9 100644 --- a/docs/reference/migration/migrate_1_0.asciidoc +++ b/docs/reference/migration/migrate_1_0.asciidoc @@ -362,7 +362,7 @@ in the query string. === Percolator The percolator has been redesigned and because of this the dedicated `_percolator` index is no longer used by the percolator, -but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elasticsearch.org/blog/percolator-redesign-blog-post/[redesigned percolator] +but instead the percolator works with a dedicated `.percolator` type. Read the http://www.elastic.co/blog/percolator-redesign-blog-post[redesigned percolator] blog post for the reasons why the percolator has been redesigned. Elasticsearch will *not* delete the `_percolator` index when upgrading, only the percolate api will not use the queries diff --git a/docs/reference/migration/migrate_2_0.asciidoc b/docs/reference/migration/migrate_2_0.asciidoc index b20e1960fee..292bb633a29 100644 --- a/docs/reference/migration/migrate_2_0.asciidoc +++ b/docs/reference/migration/migrate_2_0.asciidoc @@ -139,6 +139,8 @@ equivalent to the former `pre_zone` option. Setting `time_zone` to a value like being applied in the specified time zone but In addition to this, also the `pre_zone_adjust_large_interval` is removed because we now always return dates and bucket keys in UTC. +Both the `histogram` and `date_histogram` aggregations now have a default `min_doc_count` of `0` instead of `1` previously. + `include`/`exclude` filtering on the `terms` aggregation now uses the same syntax as regexp queries instead of the Java syntax. While simple regexps should still work, more complex ones might need some rewriting. Also, the `flags` parameter is not supported anymore. @@ -270,7 +272,7 @@ to provide special features. They now have limited configuration options. * `_field_names` configuration is limited to disabling the field. * `_size` configuration is limited to enabling the field. -=== Boolean fields +==== Boolean fields Boolean fields used to have a string fielddata with `F` meaning `false` and `T` meaning `true`. They have been refactored to use numeric fielddata, with `0` @@ -302,6 +304,22 @@ the user-friendly representation of boolean fields: `false`/`true`: ] --------------- +==== Murmur3 Fields +Fields of type `murmur3` can no longer change `doc_values` or `index` setting. +They are always stored with doc values, and not indexed. + +==== Source field configuration +The `_source` field no longer supports `includes` and `excludes` paramters. When +`_source` is enabled, the entire original source will be stored. + +==== Config based mappings +The ability to specify mappings in configuration files has been removed. To specify +default mappings that apply to multiple indexes, use index templates. + +The following settings are no longer valid: +* `index.mapper.default_mapping_location` +* `index.mapper.default_percolator_mapping_location` + === Codecs It is no longer possible to specify per-field postings and doc values formats @@ -341,6 +359,11 @@ Deprecated script parameters `id`, `file`, and `scriptField` have been removed from all scriptable APIs. `script_id`, `script_file` and `script` should be used in their place. +=== Groovy scripts sandbox + +The groovy sandbox and related settings have been removed. Groovy is now a non +sandboxed scripting language, without any option to turn the sandbox on. + === Plugins making use of scripts Plugins that make use of scripts must register their own script context through @@ -377,6 +400,11 @@ be used separately to control whether `routing_nodes` should be returned. === Query DSL +Change to ranking behaviour: single-term queries on numeric fields now score in the same way as string fields (use of IDF, norms if enabled). +Previously, term queries on numeric fields were deliberately prevented from using the usual Lucene scoring logic and this behaviour was undocumented and, to some, unexpected. +If the introduction of scoring to numeric fields is undesirable for your query clauses the fix is simple: wrap them in a `constant_score` or use a `filter` expression instead. + + The `fuzzy_like_this` and `fuzzy_like_this_field` queries have been removed. The `limit` filter is deprecated and becomes a no-op. You can achieve similar @@ -389,3 +417,44 @@ favour or `bool`. The `execution` option of the `terms` filter is now deprecated and ignored if provided. + +The `_cache` and `_cache_key` parameters of filters are deprecated in the REST +layer and removed in the Java API. In case they are specified they will be +ignored. Instead filters are always used as their own cache key and elasticsearch +makes decisions by itself about whether it should cache filters based on how +often they are used. + +=== Snapshot and Restore + +The obsolete parameters `expand_wildcards_open` and `expand_wildcards_close` are no longer +supported by the snapshot and restore operations. These parameters have been replaced by +a single `expand_wildcards` parameter. See <> for more. + +=== `_shutdown` API + +The `_shutdown` API has been removed without a replacement. Nodes should be managed via operating +systems and the provided start/stop scripts. + +=== Analyze API + +The Analyze API return 0 as first Token's position instead of 1. + +=== Multiple data.path striping + +Previously, if the `data.path` setting listed multiple data paths, then a +shard would be ``striped'' across all paths by writing a whole file to each +path in turn (in accordance with the `index.store.distributor` setting). The +result was that the files from a single segment in a shard could be spread +across multiple disks, and the failure of any one disk could corrupt multiple +shards. + +This striping is no longer supported. Instead, different shards may be +allocated to different paths, but all of the files in a single shard will be +written to the same path. + +If striping is detected while starting Elasticsearch 2.0.0 or later, all of +the files belonging to the same shard will be migrated to the same path. If +there is not enough disk space to complete this migration, the upgrade will be +cancelled and can only be resumed once enough disk space is made available. + +The `index.store.distributor` setting has also been removed. diff --git a/docs/reference/modules/cluster.asciidoc b/docs/reference/modules/cluster.asciidoc index 12d4c7443fd..25a88b2eeeb 100644 --- a/docs/reference/modules/cluster.asciidoc +++ b/docs/reference/modules/cluster.asciidoc @@ -227,7 +227,7 @@ several attributes, for example: [source,js] -------------------------------------------------- curl -XPUT localhost:9200/test/_settings -d '{ - "index.routing.allocation.include.group1" : "xxx" + "index.routing.allocation.include.group1" : "xxx", "index.routing.allocation.include.group2" : "yyy", "index.routing.allocation.exclude.group3" : "zzz", "index.routing.allocation.require.group4" : "aaa" diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 72f4993d139..9fb441a646c 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -42,6 +42,9 @@ to `100mb` |`http.max_initial_line_length` |The max length of an HTTP URL. Defaults to `4kb` +|`http.max_header_size` | The max size of allowed headers. Defaults to `8kB` + + |`http.compression` |Support for compression when possible (with Accept-Encoding). Defaults to `false`. diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc index 25f01a4715e..c06fc9c6e57 100644 --- a/docs/reference/modules/plugins.asciidoc +++ b/docs/reference/modules/plugins.asciidoc @@ -26,7 +26,7 @@ plugin --install // ----------------------------------- The plugins will be -automatically downloaded in this case from `download.elasticsearch.org`, +automatically downloaded in this case from `download.elastic.co`, and in case they don't exist there, from maven (central and sonatype). Note that when the plugin is located in maven central or sonatype diff --git a/docs/reference/modules/scripting.asciidoc b/docs/reference/modules/scripting.asciidoc index c0ea9368c23..750802c4ec2 100644 --- a/docs/reference/modules/scripting.asciidoc +++ b/docs/reference/modules/scripting.asciidoc @@ -11,26 +11,11 @@ The scripting module uses by default http://groovy.codehaus.org/[groovy] scripting language with some extensions. Groovy is used since it is extremely fast and very simple to use. -.Groovy dynamic scripting disabled by default from v1.4.3 +.Groovy dynamic scripting off by default from v1.4.3 [IMPORTANT] =================================================== -Elasticsearch versions 1.3.0-1.3.7 and 1.4.0-1.4.2 have a vulnerability in the -Groovy scripting engine. The vulnerability allows an attacker to construct -Groovy scripts that escape the sandbox and execute shell commands as the user -running the Elasticsearch Java VM. - -If you are running a vulnerable version of Elasticsearch, you should either -upgrade to at least v1.3.8 or v1.4.3, or disable dynamic Groovy scripts by -adding this setting to the `config/elasticsearch.yml` file in all nodes in the -cluster: - -[source,yaml] ------------------------------------ -script.groovy.sandbox.enabled: false ------------------------------------ - -This will turn off the Groovy sandbox, thus preventing dynamic Groovy scripts +Groovy dynamic scripting is off by default, preventing dynamic Groovy scripts from being accepted as part of a request or retrieved from the special `.scripts` index. You will still be able to use Groovy scripts stored in files in the `config/scripts/` directory on every node. @@ -69,7 +54,7 @@ GET /_search { "script_fields": { "my_field": { - "script_file": "my_test", + "script_file": "my_script", "params": { "my_var": 2 } @@ -351,39 +336,6 @@ The default scripting language (assuming no `lang` parameter is provided) is `groovy`. In order to change it, set the `script.default_lang` to the appropriate language. -[float] -=== Groovy Sandboxing - -Elasticsearch sandboxes Groovy scripts that are compiled and executed in order -to ensure they don't perform unwanted actions. There are a number of options -that can be used for configuring this sandbox: - -`script.groovy.sandbox.receiver_whitelist`:: - - Comma-separated list of string classes for objects that may have methods - invoked. - -`script.groovy.sandbox.package_whitelist`:: - - Comma-separated list of packages under which new objects may be constructed. - -`script.groovy.sandbox.class_whitelist`:: - - Comma-separated list of classes that are allowed to be constructed. - -`script.groovy.sandbox.method_blacklist`:: - - Comma-separated list of methods that are never allowed to be invoked, - regardless of target object. - -`script.groovy.sandbox.enabled`:: - - Flag to enable the sandbox (defaults to `false` meaning the sandbox is - disabled). - -When specifying whitelist or blacklist settings for the groovy sandbox, all -options replace the current whitelist, they are not additive. - [float] === Automatic Script Reloading @@ -424,10 +376,7 @@ automatically loaded. [float] === Lucene Expressions Scripts -[WARNING] -======================== -This feature is *experimental* and subject to change in future versions. -======================== +experimental[The Lucene expressions module is undergoing significant development and the exposed functionality is likely to change in the future] Lucene's expressions module provides a mechanism to compile a `javascript` expression to bytecode. This allows very fast execution, @@ -440,9 +389,23 @@ for details on what operators and functions are available. Variables in `expression` scripts are available to access: * Single valued document fields, e.g. `doc['myfield'].value` +* Single valued document fields can also be accessed without `.value` e.g. `doc['myfield']` * Parameters passed into the script, e.g. `mymodifier` * The current document's score, `_score` (only available when used in a `script_score`) +Variables in `expression` scripts that are of type `date` may use the following member methods: + +* getYear() +* getMonth() +* getDayOfMonth() +* getHourOfDay() +* getMinutes() +* getSeconds() + +The following example shows the difference in years between the `date` fields date0 and date1: + +`doc['date1'].getYear() - doc['date0'].getYear()` + There are a few limitations relative to other script languages: * Only numeric fields may be accessed diff --git a/docs/reference/query-dsl/filters.asciidoc b/docs/reference/query-dsl/filters.asciidoc index 0c78dd21934..59a4a06caec 100644 --- a/docs/reference/query-dsl/filters.asciidoc +++ b/docs/reference/query-dsl/filters.asciidoc @@ -10,85 +10,14 @@ As a general rule, filters should be used instead of queries: [[caching]] === Filters and Caching -Filters can be a great candidate for caching. Caching the result of a -filter does not require a lot of memory, and will cause other queries -executing against the same filter (same parameters) to be blazingly -fast. +Filters can be a great candidate for caching. Caching the document set that +a filter matches does not require much memory and can help improve +execution speed of queries. -However the cost of caching is not the same for all filters. For -instance some filters are already fast out of the box while caching could -add significant overhead, and some filters produce results that are already -cacheable so caching them is just a matter of putting the result in the -cache. - -The default caching policy, `_cache: auto`, tracks the 1000 most recently -used filters on a per-index basis and makes decisions based on their -frequency. - -[float] -==== Filters that read directly the index structure - -Some filters can directly read the index structure and potentially jump -over large sequences of documents that are not worth evaluating (for -instance when these documents do not match the query). Caching these -filters introduces overhead given that all documents that the filter -matches need to be consumed in order to be loaded into the cache. - -These filters, which include the <> and -<> filters, are only cached after they -appear 5 times or more in the history of the 1000 most recently used -filters. - -[float] -==== Filters that produce results that are already cacheable - -Some filters produce results that are already cacheable, and the difference -between caching and not caching them is the act of placing the result in -the cache or not. These filters, which include the -<>, -<>, and -<> filters, are by default cached after they -appear twice or more in the history of the most 1000 recently used filters. - -[float] -==== Computational filters - -Some filters need to run some computation in order to figure out whether -a given document matches a filter. These filters, which include the geo and -<> filters, but also the -<> and <> -filters when using the `fielddata` execution mode are never cached by default, -as it would require to evaluate the filter on all documents in your indices -while they can otherwise be only evaluated on documents that match the query. - -[float] -==== Compound filters - -The last type of filters are those working with other filters, and includes -the <>, -<>, -<> and -<> filters. - -There is no general rule about these filters. Depending on the filters that -they wrap, they will sometimes return a filter that dynamically evaluates the -sub filters and sometimes evaluate the sub filters eagerly in order to return -a result that is already cacheable, so depending on the case, these filters -will be cached after they appear 2+ or 5+ times in the history of the most -1000 recently used filters. - -[float] -==== Overriding the default behaviour - -All filters allow to set `_cache` element on them to explicitly control -caching. It accepts 3 values: `true` in order to cache the filter, `false` -to make sure that the filter will not be cached, and `auto`, which is the -default and will decide on whether to cache the filter based on the cost -to cache it and how often it has been used as explained above. - -Filters also allow to set `_cache_key` which will be used as the -caching key for that filter. This can be handy when using very large -filters (like a terms filter with many elements in it). +Elasticsearch decides to cache filters based on how often they are used. For +this reason you might occasionally see better performance by splitting +complex filters into a static part that Elasticsearch will cache and a dynamic +part which is least costly than the original filter. include::filters/and-filter.asciidoc[] diff --git a/docs/reference/query-dsl/filters/and-filter.asciidoc b/docs/reference/query-dsl/filters/and-filter.asciidoc index 043a62e68bf..9484d4bf999 100644 --- a/docs/reference/query-dsl/filters/and-filter.asciidoc +++ b/docs/reference/query-dsl/filters/and-filter.asciidoc @@ -32,40 +32,3 @@ filters. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of -reuse. It is possible to opt-in explicitely for caching by setting `_cache` -to `true`. Since the `_cache` element requires to be set on the `and` filter -itself, the structure then changes a bit to have the filters provided within a -`filters` element: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "and" : { - "filters": [ - { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - { - "prefix" : { "name.second" : "ba" } - } - ], - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/bool-filter.asciidoc b/docs/reference/query-dsl/filters/bool-filter.asciidoc index c81350b032a..5f6b86021ab 100644 --- a/docs/reference/query-dsl/filters/bool-filter.asciidoc +++ b/docs/reference/query-dsl/filters/bool-filter.asciidoc @@ -12,8 +12,8 @@ accept a filter. { "filtered" : { "query" : { - "queryString" : { - "default_field" : "message", + "queryString" : { + "default_field" : "message", "query" : "elasticsearch" } }, @@ -24,7 +24,7 @@ accept a filter. }, "must_not" : { "range" : { - "age" : { "from" : 10, "to" : 20 } + "age" : { "gte" : 10, "lt" : 20 } } }, "should" : [ @@ -38,6 +38,6 @@ accept a filter. } } } -} +} -------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc index 7f16ec562d9..748756d7857 100644 --- a/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-bounding-box-filter.asciidoc @@ -230,11 +230,3 @@ are not supported. Here is an example: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same bounding box parameters are used on several (many) other -queries. Note, the process of caching the first execution is higher when -caching (since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc index 670245a11a3..11ab6ccaa66 100644 --- a/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-distance-filter.asciidoc @@ -172,11 +172,3 @@ The `geo_distance` filter can work with multiple locations / points per document. Once a single location / point matches the filter, the document will be included in the filter. -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same point and distance parameters are used on several (many) other -queries. Note, the process of caching the first execution is higher when -caching (since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc index a4212343eff..22bcb3fce31 100644 --- a/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-polygon-filter.asciidoc @@ -116,11 +116,3 @@ The filter *requires* the <> type to be set on the relevant field. -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same points parameters are used on several (many) other queries. -Note, the process of caching the first execution is higher when caching -(since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc index dfe06932bbd..ca1df1ea995 100644 --- a/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geo-shape-filter.asciidoc @@ -110,12 +110,3 @@ shape: } -------------------------------------------------- -[float] -==== Caching - -The result of the Filter is not cached by default. Setting `_cache` to -`true` will mean the results of the Filter will be cached. Since shapes -can contain 10s-100s of coordinates and any one differing means a new -shape, it may make sense to only using caching when you are sure that -the shapes will remain reasonably static. - diff --git a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc index cd77803f53f..5f55936c616 100644 --- a/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc +++ b/docs/reference/query-dsl/filters/geohash-cell-filter.asciidoc @@ -61,10 +61,3 @@ next to the given cell. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The -`_cache` parameter can be set to `true` to turn caching on. -By default the filter uses the resulting geohash cells as a cache key. -This can be changed by using the `_cache_key` option. diff --git a/docs/reference/query-dsl/filters/has-child-filter.asciidoc b/docs/reference/query-dsl/filters/has-child-filter.asciidoc index 2605505a792..4802a5c07fa 100644 --- a/docs/reference/query-dsl/filters/has-child-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-child-filter.asciidoc @@ -88,9 +88,3 @@ APIS, eg: curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" -------------------------------------------------- -[float] -==== Caching - -The `has_child` filter cannot be cached in the filter cache. The `_cache` -and `_cache_key` options are a no-op in this filter. Also any filter that -wraps the `has_child` filter either directly or indirectly will not be cached. diff --git a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc index 345e69258bc..dc708cceda3 100644 --- a/docs/reference/query-dsl/filters/has-parent-filter.asciidoc +++ b/docs/reference/query-dsl/filters/has-parent-filter.asciidoc @@ -63,9 +63,3 @@ APIS, eg: curl -XGET "http://localhost:9200/_stats/id_cache?pretty&human" -------------------------------------------------- -[float] -==== Caching - -The `has_parent` filter cannot be cached in the filter cache. The `_cache` -and `_cache_key` options are a no-op in this filter. Also any filter that -wraps the `has_parent` filter either directly or indirectly will not be cached. diff --git a/docs/reference/query-dsl/filters/nested-filter.asciidoc b/docs/reference/query-dsl/filters/nested-filter.asciidoc index 584e26e04f6..41e14cd00c4 100644 --- a/docs/reference/query-dsl/filters/nested-filter.asciidoc +++ b/docs/reference/query-dsl/filters/nested-filter.asciidoc @@ -2,10 +2,7 @@ === Nested Filter A `nested` filter works in a similar fashion to the -<> query, except it's -used as a filter. It follows exactly the same structure, but also allows -to cache the results (set `_cache` to `true`), and have it named (set -the `_name` value). For example: +<> query. For example: [source,js] -------------------------------------------------- @@ -26,8 +23,7 @@ the `_name` value). For example: } ] } - }, - "_cache" : true + } } } } diff --git a/docs/reference/query-dsl/filters/not-filter.asciidoc b/docs/reference/query-dsl/filters/not-filter.asciidoc index 1e2b50fac23..ed533fc6d32 100644 --- a/docs/reference/query-dsl/filters/not-filter.asciidoc +++ b/docs/reference/query-dsl/filters/not-filter.asciidoc @@ -50,33 +50,3 @@ Or, in a longer form with a `filter` element: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached if there is evidence of reuse. -The `_cache` can be set to `true` in order to cache it (though usually -not needed). Here is an example: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "not" : { - "filter" : { - "range" : { - "postDate" : { - "from" : "2010-03-01", - "to" : "2010-04-01" - } - } - }, - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/or-filter.asciidoc b/docs/reference/query-dsl/filters/or-filter.asciidoc index c7c845c33ee..890d30f38e0 100644 --- a/docs/reference/query-dsl/filters/or-filter.asciidoc +++ b/docs/reference/query-dsl/filters/or-filter.asciidoc @@ -27,36 +27,3 @@ filters. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence -of reuse. The `_cache` can be -set to `true` in order to cache it (though usually not needed). Since -the `_cache` element requires to be set on the `or` filter itself, the -structure then changes a bit to have the filters provided within a -`filters` element: - -[source,js] --------------------------------------------------- -{ - "filtered" : { - "query" : { - "term" : { "name.first" : "shay" } - }, - "filter" : { - "or" : { - "filters" : [ - { - "term" : { "name.second" : "banon" } - }, - { - "term" : { "name.nick" : "kimchy" } - } - ], - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/prefix-filter.asciidoc b/docs/reference/query-dsl/filters/prefix-filter.asciidoc index 73c13ec8fe1..964d9f42ba2 100644 --- a/docs/reference/query-dsl/filters/prefix-filter.asciidoc +++ b/docs/reference/query-dsl/filters/prefix-filter.asciidoc @@ -16,22 +16,3 @@ a filter. Can be placed within queries that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is cached by default if there is evidence of reuse. -The `_cache` can be set to `true` in order to cache it. Here is an example: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "prefix" : { - "user" : "ki", - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/query-filter.asciidoc b/docs/reference/query-dsl/filters/query-filter.asciidoc index 2c5a7556c9a..8df0f3c3b11 100644 --- a/docs/reference/query-dsl/filters/query-filter.asciidoc +++ b/docs/reference/query-dsl/filters/query-filter.asciidoc @@ -19,34 +19,3 @@ that accept a filter. } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. - -The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same query is used on several (many) other queries. Note, the -process of caching the first execution is higher when not caching (since -it needs to satisfy different queries). - -Setting the `_cache` element requires a different format for the -`query`: - -[source,js] --------------------------------------------------- -{ - "constantScore" : { - "filter" : { - "fquery" : { - "query" : { - "query_string" : { - "query" : "this AND that OR thus" - } - }, - "_cache" : true - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/range-filter.asciidoc b/docs/reference/query-dsl/filters/range-filter.asciidoc index 51d7390f1b1..0c84f91e196 100644 --- a/docs/reference/query-dsl/filters/range-filter.asciidoc +++ b/docs/reference/query-dsl/filters/range-filter.asciidoc @@ -95,11 +95,3 @@ requires more memory, so make sure you have sufficient memory on your nodes in order to use this execution mode. It usually makes sense to use it on fields you're already aggregating or sorting by. -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. The -`_cache` can be set to `false` to turn it off. - -Having the `now` expression used without rounding will make the filter unlikely to be -cached since reuse is very unlikely. diff --git a/docs/reference/query-dsl/filters/regexp-filter.asciidoc b/docs/reference/query-dsl/filters/regexp-filter.asciidoc index 1f11da47565..06a45ae0739 100644 --- a/docs/reference/query-dsl/filters/regexp-filter.asciidoc +++ b/docs/reference/query-dsl/filters/regexp-filter.asciidoc @@ -51,9 +51,7 @@ You have to enable caching explicitly in order to have the "flags" : "INTERSECTION|COMPLEMENT|EMPTY", "max_determinized_states": 20000 }, - "_name":"test", - "_cache" : true, - "_cache_key" : "key" + "_name":"test" } } } diff --git a/docs/reference/query-dsl/filters/script-filter.asciidoc b/docs/reference/query-dsl/filters/script-filter.asciidoc index f9e0cd19cee..2f49422d88a 100644 --- a/docs/reference/query-dsl/filters/script-filter.asciidoc +++ b/docs/reference/query-dsl/filters/script-filter.asciidoc @@ -43,11 +43,3 @@ to use the ability to pass parameters to the script itself, for example: } ---------------------------------------------- -[float] -==== Caching - -The result of the filter is not cached by default. The `_cache` can be -set to `true` to cache the *result* of the filter. This is handy when -the same script and parameters are used on several (many) other queries. -Note, the process of caching the first execution is higher when caching -(since it needs to satisfy different queries). diff --git a/docs/reference/query-dsl/filters/term-filter.asciidoc b/docs/reference/query-dsl/filters/term-filter.asciidoc index cb249a83604..768fd94ac89 100644 --- a/docs/reference/query-dsl/filters/term-filter.asciidoc +++ b/docs/reference/query-dsl/filters/term-filter.asciidoc @@ -17,22 +17,3 @@ accept a filter, for example: } -------------------------------------------------- -[float] -==== Caching - -The result of the filter is only cached by default if there is evidence of reuse. -The `_cache` can be set to `false` to turn it off. Here is an example: - -[source,js] --------------------------------------------------- -{ - "constant_score" : { - "filter" : { - "term" : { - "user" : "kimchy", - "_cache" : false - } - } - } -} --------------------------------------------------- diff --git a/docs/reference/query-dsl/filters/terms-filter.asciidoc b/docs/reference/query-dsl/filters/terms-filter.asciidoc index 19e9358a4dd..027fc174db2 100644 --- a/docs/reference/query-dsl/filters/terms-filter.asciidoc +++ b/docs/reference/query-dsl/filters/terms-filter.asciidoc @@ -18,13 +18,6 @@ Filters documents that have fields that match any of the provided terms The `terms` filter is also aliased with `in` as the filter name for simpler usage. -[float] -==== Caching - -The result of the filter is cached if there is evidence of reuse. It is -possible to enable caching explicitely by setting `_cache` to `true` and -to disable caching by setting `_cache` to `false`. - [float] ==== Terms lookup mechanism @@ -93,8 +86,7 @@ curl -XGET localhost:9200/tweets/_search -d '{ "type" : "user", "id" : "2", "path" : "followers" - }, - "_cache_key" : "user_2_friends" + } } } } @@ -102,10 +94,6 @@ curl -XGET localhost:9200/tweets/_search -d '{ }' -------------------------------------------------- -If there are lots of matching values, then `_cache_key` is recommended to be set, -so that the filter cache will not store a reference to the potentially heavy -terms filter. - The structure of the external terms document can also include array of inner objects, for example: diff --git a/docs/reference/query-dsl/queries.asciidoc b/docs/reference/query-dsl/queries.asciidoc index d56d2c719f1..20ce2789f7c 100644 --- a/docs/reference/query-dsl/queries.asciidoc +++ b/docs/reference/query-dsl/queries.asciidoc @@ -52,6 +52,8 @@ include::queries/range-query.asciidoc[] include::queries/regexp-query.asciidoc[] +include::queries/span-containing-query.asciidoc[] + include::queries/span-first-query.asciidoc[] include::queries/span-multi-term-query.asciidoc[] @@ -64,6 +66,8 @@ include::queries/span-or-query.asciidoc[] include::queries/span-term-query.asciidoc[] +include::queries/span-within-query.asciidoc[] + include::queries/term-query.asciidoc[] include::queries/terms-query.asciidoc[] diff --git a/docs/reference/query-dsl/queries/function-score-query.asciidoc b/docs/reference/query-dsl/queries/function-score-query.asciidoc index 32b7c0c386c..8b742bb088d 100644 --- a/docs/reference/query-dsl/queries/function-score-query.asciidoc +++ b/docs/reference/query-dsl/queries/function-score-query.asciidoc @@ -175,7 +175,8 @@ doing so would look like: "field_value_factor": { "field": "popularity", "factor": 1.2, - "modifier": "sqrt" + "modifier": "sqrt", + "missing": 1 } -------------------------------------------------- @@ -193,6 +194,8 @@ There are a number of options for the `field_value_factor` function: |`modifier` |Modifier to apply to the field value, can be one of: `none`, `log`, `log1p`, `log2p`, `ln`, `ln1p`, `ln2p`, `square`, `sqrt`, or `reciprocal`. Defaults to `none`. +|`missing` |Value used if the document doesn't have that field. The modifier +and factor are still applied to it as though it were read from the document. |======================================================================= Keep in mind that taking the log() of 0, or the square root of a negative number diff --git a/docs/reference/query-dsl/queries/match-query.asciidoc b/docs/reference/query-dsl/queries/match-query.asciidoc index e2a8178135c..2ebb8e934e2 100644 --- a/docs/reference/query-dsl/queries/match-query.asciidoc +++ b/docs/reference/query-dsl/queries/match-query.asciidoc @@ -87,10 +87,10 @@ if one of the low frequency (below the cutoff) terms in the case of an operator match. This query allows handling `stopwords` dynamically at runtime, is domain -independent and doesn't require on a stopword file. It prevent scoring / +independent and doesn't require a stopword file. It prevents scoring / iterating high frequency terms and only takes the terms into account if a -more significant / lower frequency terms match a document. Yet, if all of -the query terms are above the given `cutoff_frequency` the query is +more significant / lower frequency term matches a document. Yet, if all +of the query terms are above the given `cutoff_frequency` the query is automatically transformed into a pure conjunction (`and`) query to ensure fast execution. @@ -98,7 +98,7 @@ The `cutoff_frequency` can either be relative to the total number of documents if in the range `[0..1)` or absolute if greater or equal to `1.0`. -Here is an example showing a query composed of stopwords exclusivly: +Here is an example showing a query composed of stopwords exclusively: [source,js] -------------------------------------------------- diff --git a/docs/reference/query-dsl/queries/span-containing-query.asciidoc b/docs/reference/query-dsl/queries/span-containing-query.asciidoc new file mode 100644 index 00000000000..965bf855b6f --- /dev/null +++ b/docs/reference/query-dsl/queries/span-containing-query.asciidoc @@ -0,0 +1,29 @@ +[[query-dsl-span-containing-query]] +=== Span Containing Query + +Returns matches which enclose another span query. The span containing +query maps to Lucene `SpanContainingQuery`. Here is an example: + +[source,js] +-------------------------------------------------- +{ + "span_containing" : { + "little" : { + "span_term" : { "field1" : "foo" } + }, + "big" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "bar" } }, + { "span_term" : { "field1" : "baz" } } + ], + "slop" : 5, + "in_order" : true + } + } + } +} +-------------------------------------------------- + +The `big` and `little` clauses can be any span type query. Matching +spans from `big` that contain matches from `little` are returned. diff --git a/docs/reference/query-dsl/queries/span-within-query.asciidoc b/docs/reference/query-dsl/queries/span-within-query.asciidoc new file mode 100644 index 00000000000..dc5c4bbfdfd --- /dev/null +++ b/docs/reference/query-dsl/queries/span-within-query.asciidoc @@ -0,0 +1,29 @@ +[[query-dsl-span-within-query]] +=== Span Within Query + +Returns matches which are enclosed inside another span query. The span within +query maps to Lucene `SpanWithinQuery`. Here is an example: + +[source,js] +-------------------------------------------------- +{ + "span_within" : { + "little" : { + "span_term" : { "field1" : "foo" } + }, + "big" : { + "span_near" : { + "clauses" : [ + { "span_term" : { "field1" : "bar" } }, + { "span_term" : { "field1" : "baz" } } + ], + "slop" : 5, + "in_order" : true + } + } + } +} +-------------------------------------------------- + +The `big` and `little` clauses can be any span type query. Matching +spans from `little` that are enclosed within `big` are returned. diff --git a/docs/reference/query-dsl/queries/template-query.asciidoc b/docs/reference/query-dsl/queries/template-query.asciidoc index 6810b0663ea..0eb43b3642c 100644 --- a/docs/reference/query-dsl/queries/template-query.asciidoc +++ b/docs/reference/query-dsl/queries/template-query.asciidoc @@ -12,7 +12,7 @@ GET /_search { "query": { "template": { - "query": { "match": { "text": "{query_string}" }}}, + "query": { "match": { "text": "{{query_string}}" }}, "params" : { "query_string" : "all about search" } @@ -45,7 +45,7 @@ GET /_search { "query": { "template": { - "query": "{ \"match\": { \"text\": \"{query_string}\" }}}", <1> + "query": "{ \"match\": { \"text\": \"{{query_string}}\" }}", <1> "params" : { "query_string" : "all about search" } @@ -85,7 +85,7 @@ Alternatively, you can register a query template in the special `.scripts` index ------------------------------------------ PUT /_search/template/my_template { - "template": { "match": { "text": "{query_string}" }}}, + "template": { "match": { "text": "{{query_string}}" }}, } ------------------------------------------ diff --git a/docs/reference/search.asciidoc b/docs/reference/search.asciidoc index 51b199c19f7..79d3c7a93fd 100644 --- a/docs/reference/search.asciidoc +++ b/docs/reference/search.asciidoc @@ -105,3 +105,5 @@ include::search/percolate.asciidoc[] include::search/more-like-this.asciidoc[] +include::search/field-stats.asciidoc[] + diff --git a/docs/reference/search/aggregations.asciidoc b/docs/reference/search/aggregations.asciidoc index e7803a27e9c..cf4b4348eda 100644 --- a/docs/reference/search/aggregations.asciidoc +++ b/docs/reference/search/aggregations.asciidoc @@ -68,6 +68,8 @@ Some aggregations work on values extracted from the aggregated documents. Typica a specific document field which is set using the `field` key for the aggregations. It is also possible to define a <> which will generate the values (per document). +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + When both `field` and `script` settings are configured for the aggregation, the script will be treated as a `value script`. While normal scripts are evaluated on a document level (i.e. the script has access to all the data associated with the document), value scripts are evaluated on the *value* level. In this mode, the values are extracted @@ -116,6 +118,38 @@ aggregated for the buckets created by their "parent" bucket aggregation. There are different bucket aggregators, each with a different "bucketing" strategy. Some define a single bucket, some define fixed number of multiple buckets, and others dynamically create the buckets during the aggregation process. +[float] +=== Reducer Aggregations + +coming[2.0.0] + +experimental[] + +Reducer aggregations work on the outputs produced from other aggregations rather than from document sets, adding +information to the output tree. There are many different types of reducer, each computing different information from +other aggregations, but these types can broken down into two families: + +_Parent_:: + A family of reducer aggregations that is provided with the output of its parent aggregation and is able + to compute new buckets or new aggregations to add to existing buckets. + +_Sibling_:: + Reducer aggregations that are provided with the output of a sibling aggregation and are able to compute a + new aggregation which will be at the same level as the sibling aggregation. + +Reducer aggregations can reference the aggregations they need to perform their computation by using the `buckets_paths` +parameter to indicate the paths to the required metrics. The syntax for defining these paths can be found in the +<> section. + +?????? SHOULD THE SECTION ABOUT DEFINING AGGREGATION PATHS +BE IN THIS PAGE AND REFERENCED FROM THE TERMS AGGREGATION DOCUMENTATION ??????? + +Reducer aggregations cannot have sub-aggregations but depending on the type it can reference another reducer in the `buckets_path` +allowing reducers to be chained. + +NOTE: Because reducer aggregations only add to the output, when chaining reducer aggregations the output of each reducer will be +included in the final output. + [float] === Caching heavy aggregations @@ -129,7 +163,7 @@ See <> for more details. [float] === Returning only aggregation results -There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by +There are many occasions when aggregations are required but search hits are not. For these cases the hits can be ignored by setting `size=0`. For example: [source,js] @@ -195,3 +229,6 @@ Then that piece of metadata will be returned in place for our `titles` terms agg include::aggregations/metrics.asciidoc[] include::aggregations/bucket.asciidoc[] + +include::aggregations/reducer.asciidoc[] + diff --git a/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc index 999a933f91d..256ef62d766 100644 --- a/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/datehistogram-aggregation.asciidoc @@ -119,7 +119,7 @@ Response: Like with the normal <>, both document level scripts and value level scripts are supported. It is also possible to control the order of the returned buckets using the `order` -settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets with -`min_doc_count > 0` will be returned). This histogram also supports the `extended_bounds` setting, which enables extending -the bounds of the histogram beyond the data itself (to read more on why you'd want to do that please refer to the -explanation <>). +settings and filter the returned buckets based on a `min_doc_count` setting (by default all buckets between the first +bucket that matches documents and the last one are returned). This histogram also supports the `extended_bounds` +setting, which enables extending the bounds of the histogram beyond the data itself (to read more on why you'd want to +do that please refer to the explanation <>). diff --git a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc index 545bc24c767..cd1fd06ddaf 100644 --- a/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc +++ b/docs/reference/search/aggregations/bucket/histogram-aggregation.asciidoc @@ -50,6 +50,10 @@ And the following may be the response: "key": 50, "doc_count": 4 }, + { + "key": 100, + "doc_count": 0 + }, { "key": 150, "doc_count": 3 @@ -60,10 +64,11 @@ And the following may be the response: } -------------------------------------------------- -The response above shows that none of the aggregated products has a price that falls within the range of `[100 - 150)`. -By default, the response will only contain those buckets with a `doc_count` greater than 0. It is possible change that -and request buckets with either a higher minimum count or even 0 (in which case elasticsearch will "fill in the gaps" -and create buckets with zero documents). This can be configured using the `min_doc_count` setting: +==== Minimum document count + +The response above show that no documents has a price that falls within the range of `[100 - 150)`. By default the +response will fill gaps in the histogram with empty buckets. It is possible change that and request buckets with +a higher minimum count thanks to the `min_doc_count` setting: [source,js] -------------------------------------------------- @@ -73,7 +78,7 @@ and create buckets with zero documents). This can be configured using the `min_d "histogram" : { "field" : "price", "interval" : 50, - "min_doc_count" : 0 + "min_doc_count" : 1 } } } @@ -96,10 +101,6 @@ Response: "key": 50, "doc_count": 4 }, - { - "key" : 100, - "doc_count" : 0 <1> - }, { "key": 150, "doc_count": 3 @@ -110,13 +111,11 @@ Response: } -------------------------------------------------- -<1> No documents were found that belong in this bucket, yet it is still returned with zero `doc_count`. - [[search-aggregations-bucket-histogram-aggregation-extended-bounds]] By default the date_/histogram returns all the buckets within the range of the data itself, that is, the documents with the smallest values (on which with histogram) will determine the min bucket (the bucket with the smallest key) and the documents with the highest values will determine the max bucket (the bucket with the highest key). Often, when when -requesting empty buckets (`"min_doc_count" : 0`), this causes a confusion, specifically, when the data is also filtered. +requesting empty buckets, this causes a confusion, specifically, when the data is also filtered. To understand why, let's look at an example: @@ -149,7 +148,6 @@ Example: "histogram" : { "field" : "price", "interval" : 50, - "min_doc_count" : 0, "extended_bounds" : { "min" : 0, "max" : 500 @@ -265,67 +263,6 @@ PATH := []*[ experimental[] The `precision_threshold` options allows to trade memory for accuracy, and +<1> The `precision_threshold` options allows to trade memory for accuracy, and defines a unique count below which counts are expected to be close to accurate. Above this value, counts might become a bit more fuzzy. The maximum supported value is 40000, thresholds above this number will have the same effect as a threshold of 40000. Default value depends on the number of parent aggregations that multiple create buckets (such as terms or histograms). -<2> experimental[] If you computed a hash on client-side, stored it into your documents and want +<2> If you computed a hash on client-side, stored it into your documents and want Elasticsearch to use them to compute counts using this hash function without rehashing values, it is possible to specify `rehash: false`. Default value is `true`. Please note that the hash must be indexed as a long when `rehash` is @@ -152,3 +152,6 @@ however since hashes need to be computed on the fly. } } -------------------------------------------------- + +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + diff --git a/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc index 73340af8e67..07d25fac65d 100644 --- a/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/extendedstats-aggregation.asciidoc @@ -91,6 +91,8 @@ Computing the grades stats based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use value script to get the new stats: diff --git a/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc index 548436b93b6..ade59477ee3 100644 --- a/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/geobounds-aggregation.asciidoc @@ -1,8 +1,6 @@ [[search-aggregations-metrics-geobounds-aggregation]] === Geo Bounds Aggregation -experimental[] - A metric aggregation that computes the bounding box containing all geo_point values for a field. diff --git a/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc index 0a2481e6d2d..facefc1201d 100644 --- a/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/max-aggregation.asciidoc @@ -44,6 +44,7 @@ Computing the max price value across all document, this time using a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. ==== Value Script diff --git a/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc index 57dbceaa585..1383cc08322 100644 --- a/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/min-aggregation.asciidoc @@ -44,6 +44,7 @@ Computing the min price value across all document, this time using a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. ==== Value Script diff --git a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc index bc5d6b3b560..6bd10110077 100644 --- a/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/percentile-aggregation.asciidoc @@ -113,6 +113,8 @@ a script to convert them on-the-fly: script to generate values which percentiles are calculated on <2> Scripting supports parameterized input just like any other script +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + [[search-aggregations-metrics-percentile-aggregation-approximation]] ==== Percentiles are (usually) approximate @@ -153,7 +155,7 @@ it. It would not be the case on more skewed distributions. [[search-aggregations-metrics-percentile-aggregation-compression]] ==== Compression -experimental[] +experimental[The `compression` parameter is specific to the current internal implementation of percentiles, and may change in the future] Approximate algorithms must balance memory utilization with estimation accuracy. This balance can be controlled using a `compression` parameter: diff --git a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc index 0fd51f723b8..d327fc66303 100644 --- a/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/percentile-rank-aggregation.asciidoc @@ -8,16 +8,16 @@ be generated by a provided script. [NOTE] ================================================== -Please see <> -and <> for advice +Please see <> +and <> for advice regarding approximation and memory use of the percentile ranks aggregation ================================================== -Percentile rank show the percentage of observed values which are below certain +Percentile rank show the percentage of observed values which are below certain value. For example, if a value is greater than or equal to 95% of the observed values it is said to be at the 95th percentile rank. -Assume your data consists of website load times. You may have a service agreement that +Assume your data consists of website load times. You may have a service agreement that 95% of page loads completely within 15ms and 99% of page loads complete within 30ms. Let's look at a range of percentiles representing load time: @@ -55,7 +55,7 @@ The response will look like this: } -------------------------------------------------- -From this information you can determine you are hitting the 99% load time target but not quite +From this information you can determine you are hitting the 99% load time target but not quite hitting the 95% load time target @@ -84,3 +84,5 @@ a script to convert them on-the-fly: <1> The `field` parameter is replaced with a `script` parameter, which uses the script to generate values which percentile ranks are calculated on <2> Scripting supports parameterized input just like any other script + +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. diff --git a/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc index 18dd6f280f9..a775d545409 100644 --- a/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/scripted-metric-aggregation.asciidoc @@ -84,24 +84,28 @@ $ curl -XPUT 'http://localhost:9200/transactions/stock/1' -d ' "type": "sale", "amount": 80 } +' $ curl -XPUT 'http://localhost:9200/transactions/stock/2' -d ' { "type": "cost", "amount": 10 } +' $ curl -XPUT 'http://localhost:9200/transactions/stock/3' -d ' { "type": "cost", "amount": 30 } +' $ curl -XPUT 'http://localhost:9200/transactions/stock/4' -d ' { "type": "sale", "amount": 130 } +' -------------------------------------------------- Lets say that documents 1 and 3 end up on shard A and documents 2 and 4 end up on shard B. The following is a breakdown of what the aggregation result is diff --git a/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc index 486182c9ef6..7fbdecd6011 100644 --- a/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/stats-aggregation.asciidoc @@ -53,6 +53,8 @@ Computing the grades stats based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script It turned out that the exam was way above the level of the students and a grade correction needs to be applied. We can use a value script to get the new stats: diff --git a/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc index 0c136490662..8857ff306ee 100644 --- a/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/sum-aggregation.asciidoc @@ -55,6 +55,8 @@ Computing the intraday return based on a script: } -------------------------------------------------- +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. + ===== Value Script Computing the sum of squares over all stock tick changes: diff --git a/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc b/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc index 900b8bca83d..ed5e23ee339 100644 --- a/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc +++ b/docs/reference/search/aggregations/metrics/valuecount-aggregation.asciidoc @@ -34,6 +34,7 @@ The name of the aggregation (`grades_count` above) also serves as the key by whi retrieved from the returned response. ==== Script + Counting the values generated by a script: [source,js] @@ -46,3 +47,5 @@ Counting the values generated by a script: } } -------------------------------------------------- + +TIP: The `script` parameter expects an inline script. Use `script_id` for indexed scripts and `script_file` for scripts in the `config/scripts/` directory. diff --git a/docs/reference/search/aggregations/reducer.asciidoc b/docs/reference/search/aggregations/reducer.asciidoc new file mode 100644 index 00000000000..a725bc77e38 --- /dev/null +++ b/docs/reference/search/aggregations/reducer.asciidoc @@ -0,0 +1,6 @@ +[[search-aggregations-reducer]] + +include::reducer/derivative-aggregation.asciidoc[] +include::reducer/max-bucket-aggregation.asciidoc[] +include::reducer/min-bucket-aggregation.asciidoc[] +include::reducer/movavg-aggregation.asciidoc[] diff --git a/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc new file mode 100644 index 00000000000..be644091b51 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/derivative-aggregation.asciidoc @@ -0,0 +1,194 @@ +[[search-aggregations-reducer-derivative-aggregation]] +=== Derivative Aggregation + +A parent reducer aggregation which calculates the derivative of a specified metric in a parent histogram (or date_histogram) +aggregation. The specified metric must be numeric and the enclosing histogram must have `min_doc_count` set to `0` (default +for `histogram` aggregations). + +The following snippet calculates the derivative of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` instructs this derivative aggregation to use the output of the `sales` aggregation for the derivative + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490 <2> + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, <3> + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315 + } + } + ] + } + } +} +-------------------------------------------------- + +<1> No derivative for the first bucket since we need at least 2 data points to calculate the derivative +<2> Derivative value units are implicitly defined by the `sales` aggregation and the parent histogram so in this case the units +would be $/month assuming the `price` field has units of $. +<3> The number of documents in the bucket are represented by the `doc_count` value + +==== Second Order Derivative + +A second order derivative can be calculated by chaining the derivative reducer aggregation onto the result of another derivative +reducer aggregation as in the following example which will calculate both the first and the second order derivative of the total +monthly sales: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + }, + "sales_deriv": { + "derivative": { + "buckets_paths": "sales" + } + }, + "sales_2nd_deriv": { + "derivative": { + "buckets_paths": "sales_deriv" <1> + } + } + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` for the second derivative points to the name of the first derivative + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } <1> + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + }, + "sales_deriv": { + "value": -490 + } <1> + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + }, + "sales_deriv": { + "value": 315 + }, + "sales_2nd_deriv": { + "value": 805 + } + } + ] + } + } +} +-------------------------------------------------- +<1> No second derivative for the first two buckets since we need at least 2 data points from the first derivative to calculate the +second derivative + +==== Dealing with gaps in the data + +There are a couple of reasons why the data output by the enclosing histogram may have gaps: + +* There are no documents matching the query for some buckets +* The data for a metric is missing in all of the documents falling into a bucket (this is most likely with either a small interval +on the enclosing histogram or with a query matching only a small number of documents) + +Where there is no data available in a bucket for a given metric it presents a problem for calculating the derivative value for both +the current bucket and the next bucket. In the derivative reducer aggregation has a `gap_policy` parameter to define what the behavior +should be when a gap in the data is found. There are currently two options for controlling the gap policy: + +_ignore_:: + This option will not produce a derivative value for any buckets where the value in the current or previous bucket is + missing + +_insert_zeros_:: + This option will assume the missing value is `0` and calculate the derivative with the value `0`. + + diff --git a/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc new file mode 100644 index 00000000000..a93c7ed8036 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/max-bucket-aggregation.asciidoc @@ -0,0 +1,82 @@ +[[search-aggregations-reducer-max-bucket-aggregation]] +=== Max Bucket Aggregation + +A sibling reducer aggregation which identifies the bucket(s) with the maximum value of a specified metric in a sibing aggregation +and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must +be a multi-bucket aggregation. + +The following snippet calculates the maximum of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + } + } + }, + "max_monthly_sales": { + "max_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` instructs this max_bucket aggregation that we want the maximum value of the `sales` aggregation in the +`sales_per_month` date histogram. + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + } + } + ] + }, + "max_monthly_sales": { + "keys": ["2015/01/01 00:00:00"], <1> + "value": 550 + } + } +} +-------------------------------------------------- + +<1> `keys` is an array of strings since the maximum value may be present in multiple buckets + diff --git a/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc new file mode 100644 index 00000000000..558d0c19983 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/min-bucket-aggregation.asciidoc @@ -0,0 +1,82 @@ +[[search-aggregations-reducer-min-bucket-aggregation]] +=== Min Bucket Aggregation + +A sibling reducer aggregation which identifies the bucket(s) with the minimum value of a specified metric in a sibling aggregation +and outputs both the value and the key(s) of the bucket(s). The specified metric must be numeric and the sibling aggregation must +be a multi-bucket aggregation. + +The following snippet calculates the minimum of the total monthly `sales`: + +[source,js] +-------------------------------------------------- +{ + "aggs" : { + "sales_per_month" : { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggs": { + "sales": { + "sum": { + "field": "price" + } + } + } + }, + "min_monthly_sales": { + "min_bucket": { + "buckets_paths": "sales_per_month>sales" <1> + } + } + } +} +-------------------------------------------------- + +<1> `bucket_paths` instructs this max_bucket aggregation that we want the minimum value of the `sales` aggregation in the +`sales_per_month` date histogram. + +And the following may be the response: + +[source,js] +-------------------------------------------------- +{ + "aggregations": { + "sales_per_month": { + "buckets": [ + { + "key_as_string": "2015/01/01 00:00:00", + "key": 1420070400000, + "doc_count": 3, + "sales": { + "value": 550 + } + }, + { + "key_as_string": "2015/02/01 00:00:00", + "key": 1422748800000, + "doc_count": 2, + "sales": { + "value": 60 + } + }, + { + "key_as_string": "2015/03/01 00:00:00", + "key": 1425168000000, + "doc_count": 2, + "sales": { + "value": 375 + } + } + ] + }, + "min_monthly_sales": { + "keys": ["2015/02/01 00:00:00"], <1> + "value": 60 + } + } +} +-------------------------------------------------- + +<1> `keys` is an array of strings since the minimum value may be present in multiple buckets + diff --git a/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc b/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc new file mode 100644 index 00000000000..03f6b7e9fa1 --- /dev/null +++ b/docs/reference/search/aggregations/reducer/movavg-aggregation.asciidoc @@ -0,0 +1,294 @@ +[[search-aggregations-reducers-movavg-reducer]] +=== Moving Average Aggregation + +Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average +value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving +average with windows size of `5` as follows: + +- (1 + 2 + 3 + 4 + 5) / 5 = 3 +- (2 + 3 + 4 + 5 + 6) / 5 = 4 +- (3 + 4 + 5 + 6 + 7) / 5 = 5 +- etc + +Moving averages are a simple method to smooth sequential data. Moving averages are typically applied to time-based data, +such as stock prices or server metrics. The smoothing can be used to eliminate high frequency fluctuations or random noise, +which allows the lower frequency trends to be more easily visualized, such as seasonality. + +==== Syntax + +A `moving_avg` aggregation looks like this in isolation: + +[source,js] +-------------------------------------------------- +{ + "movavg": { + "buckets_path": "the_sum", + "model": "double_exp", + "window": 5, + "gap_policy": "insert_zero", + "settings": { + "alpha": 0.8 + } + } +} +-------------------------------------------------- + +.`moving_avg` Parameters +|=== +|Parameter Name |Description |Required |Default + +|`buckets_path` |The path to the metric that we wish to calculate a moving average for |Required | +|`model` |The moving average weighting model that we wish to use |Optional |`simple` +|`gap_policy` |Determines what should happen when a gap in the data is encountered. |Optional |`insert_zero` +|`window` |The size of window to "slide" across the histogram. |Optional |`5` +|`settings` |Model-specific settings, contents which differ depending on the model specified. |Optional | +|=== + + +`moving_avg` aggregations must be embedded inside of a `histogram` or `date_histogram` aggregation. They can be +embedded like any other metric aggregation: + +[source,js] +-------------------------------------------------- +{ + "my_date_histo":{ <1> + "date_histogram":{ + "field":"timestamp", + "interval":"day" + }, + "aggs":{ + "the_sum":{ + "sum":{ "field": "lemmings" } <2> + }, + "the_movavg":{ + "moving_avg":{ "buckets_path": "the_sum" } <3> + } + } + } +} +-------------------------------------------------- +<1> A `date_histogram` named "my_date_histo" is constructed on the "timestamp" field, with one-day intervals +<2> A `sum` metric is used to calculate the sum of a field. This could be any metric (sum, min, max, etc) +<3> Finally, we specify a `moving_avg` aggregation which uses "the_sum" metric as its input. + +Moving averages are built by first specifying a `histogram` or `date_histogram` over a field. You can then optionally +add normal metrics, such as a `sum`, inside of that histogram. Finally, the `moving_avg` is embedded inside the histogram. +The `buckets_path` parameter is then used to "point" at one of the sibling metrics inside of the histogram. + +A moving average can also be calculated on the document count of each bucket, instead of a metric: + +[source,js] +-------------------------------------------------- +{ + "my_date_histo":{ + "date_histogram":{ + "field":"timestamp", + "interval":"day" + }, + "aggs":{ + "the_movavg":{ + "moving_avg":{ "buckets_path": "_count" } <1> + } + } + } +} +-------------------------------------------------- +<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram + +==== Models + +The `moving_avg` aggregation includes four different moving average "models". The main difference is how the values in the +window are weighted. As data-points become "older" in the window, they may be weighted differently. This will +affect the final average for that window. + +Models are specified using the `model` parameter. Some models may have optional configurations which are specified inside +the `settings` parameter. + +===== Simple + +The `simple` model calculates the sum of all values in the window, then divides by the size of the window. It is effectively +a simple arithmetic mean of the window. The simple model does not perform any time-dependent weighting, which means +the values from a `simple` moving average tend to "lag" behind the real data. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "simple" + } + } +} +-------------------------------------------------- + +A `simple` model has no special settings to configure + +The window size can change the behavior of the moving average. For example, a small window (`"window": 10`) will closely +track the data and only smooth out small scale fluctuations: + +[[movavg_10window]] +.Moving average with window of size 10 +image::images/reducers_movavg/movavg_10window.png[] + +In contrast, a `simple` moving average with larger window (`"window": 100`) will smooth out all higher-frequency fluctuations, +leaving only low-frequency, long term trends. It also tends to "lag" behind the actual data by a substantial amount: + +[[movavg_100window]] +.Moving average with window of size 100 +image::images/reducers_movavg/movavg_100window.png[] + + +==== Linear + +The `linear` model assigns a linear weighting to points in the series, such that "older" datapoints (e.g. those at +the beginning of the window) contribute a linearly less amount to the total average. The linear weighting helps reduce +the "lag" behind the data's mean, since older points have less influence. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "linear" + } +} +-------------------------------------------------- + +A `linear` model has no special settings to configure + +Like the `simple` model, window size can change the behavior of the moving average. For example, a small window (`"window": 10`) +will closely track the data and only smooth out small scale fluctuations: + +[[linear_10window]] +.Linear moving average with window of size 10 +image::images/reducers_movavg/linear_10window.png[] + +In contrast, a `linear` moving average with larger window (`"window": 100`) will smooth out all higher-frequency fluctuations, +leaving only low-frequency, long term trends. It also tends to "lag" behind the actual data by a substantial amount, +although typically less than the `simple` model: + +[[linear_100window]] +.Linear moving average with window of size 100 +image::images/reducers_movavg/linear_100window.png[] + +==== Single Exponential + +The `single_exp` model is similar to the `linear` model, except older data-points become exponentially less important, +rather than linearly less important. The speed at which the importance decays can be controlled with an `alpha` +setting. Small values make the weight decay slowly, which provides greater smoothing and takes into account a larger +portion of the window. Larger valuers make the weight decay quickly, which reduces the impact of older values on the +moving average. This tends to make the moving average track the data more closely but with less smoothing. + +The default value of `alpha` is `0.5`, and the setting accepts any float from 0-1 inclusive. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "single_exp", + "settings" : { + "alpha" : 0.5 + } + } +} +-------------------------------------------------- + + + +[[single_0.2alpha]] +.Single Exponential moving average with window of size 10, alpha = 0.2 +image::images/reducers_movavg/single_0.2alpha.png[] + +[[single_0.7alpha]] +.Single Exponential moving average with window of size 10, alpha = 0.7 +image::images/reducers_movavg/single_0.7alpha.png[] + +==== Double Exponential + +The `double_exp` model, sometimes called "Holt's Linear Trend" model, incorporates a second exponential term which +tracks the data's trend. Single exponential does not perform well when the data has an underlying linear trend. The +double exponential model calculates two values internally: a "level" and a "trend". + +The level calculation is similar to `single_exp`, and is an exponentially weighted view of the data. The difference is +that the previously smoothed value is used instead of the raw value, which allows it to stay close to the original series. +The trend calculation looks at the difference between the current and last value (e.g. the slope, or trend, of the +smoothed data). The trend value is also exponentially weighted. + +Values are produced by multiplying the level and trend components. + +The default value of `alpha` and `beta` is `0.5`, and the settings accept any float from 0-1 inclusive. + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "double_exp", + "settings" : { + "alpha" : 0.5, + "beta" : 0.5 + } + } +} +-------------------------------------------------- + +In practice, the `alpha` value behaves very similarly in `double_exp` as `single_exp`: small values produce more smoothing +and more lag, while larger values produce closer tracking and less lag. The value of `beta` is often difficult +to see. Small values emphasize long-term trends (such as a constant linear trend in the whole series), while larger +values emphasize short-term trends. This will become more apparently when you are predicting values. + +[[double_0.2beta]] +.Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.2 +image::images/reducers_movavg/double_0.2beta.png[] + +[[double_0.7beta]] +.Double Exponential moving average with window of size 100, alpha = 0.5, beta = 0.7 +image::images/reducers_movavg/double_0.7beta.png[] + +=== Prediction + +All the moving average model support a "prediction" mode, which will attempt to extrapolate into the future given the +current smoothed, moving average. Depending on the model and parameter, these predictions may or may not be accurate. + +Predictions are enabled by adding a `predict` parameter to any moving average aggregation, specifying the nubmer of +predictions you would like appended to the end of the series. These predictions will be spaced out at the same interval +as your buckets: + +[source,js] +-------------------------------------------------- +{ + "the_movavg":{ + "moving_avg":{ + "buckets_path": "the_sum", + "model" : "simple", + "predict" 10 + } +} +-------------------------------------------------- + +The `simple`, `linear` and `single_exp` models all produce "flat" predictions: they essentially converge on the mean +of the last value in the series, producing a flat: + +[[simple_prediction]] +.Simple moving average with window of size 10, predict = 50 +image::images/reducers_movavg/simple_prediction.png[] + +In contrast, the `double_exp` model can extrapolate based on local or global constant trends. If we set a high `beta` +value, we can extrapolate based on local constant trends (in this case the predictions head down, because the data at the end +of the series was heading in a downward direction): + +[[double_prediction_local]] +.Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.8 +image::images/reducers_movavg/double_prediction_local.png[] + +In contrast, if we choose a small `beta`, the predictions are based on the global constant trend. In this series, the +global trend is slightly positive, so the prediction makes a sharp u-turn and begins a positive slope: + +[[double_prediction_global]] +.Double Exponential moving average with window of size 100, predict = 20, alpha = 0.5, beta = 0.1 +image::images/reducers_movavg/double_prediction_global.png[] diff --git a/docs/reference/search/count.asciidoc b/docs/reference/search/count.asciidoc index a04c3950987..258e3c94b40 100644 --- a/docs/reference/search/count.asciidoc +++ b/docs/reference/search/count.asciidoc @@ -64,7 +64,8 @@ query. |default_operator |The default operator to be used, can be `AND` or `OR`. Defaults to `OR`. -|terminate_after |experimental[] The maximum count for each shard, upon +|terminate_after |experimental[The API for this feature may change in the future] +The maximum count for each shard, upon reaching which the query execution will terminate early. If set, the response will have a boolean field `terminated_early` to indicate whether the query execution has actually terminated_early. diff --git a/docs/reference/search/field-stats.asciidoc b/docs/reference/search/field-stats.asciidoc new file mode 100644 index 00000000000..7cac3e7e5c3 --- /dev/null +++ b/docs/reference/search/field-stats.asciidoc @@ -0,0 +1,204 @@ +[[search-field-stats]] +== Field stats API + +experimental[] + +The field stats api allows one to find statistical properties of a field without executing a search, but +looking up measurements that are natively available in the Lucene index. This can be useful to explore a dataset which +you don't know much about. For example, this allows creating a histogram aggregation with meaningful intervals. + +The field stats api by defaults executes on all indices, but can execute on specific indices too. + +All indices: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/_field_stats?fields=rating" +-------------------------------------------------- + +Specific indices: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/index1,index2/_field_stats?fields=rating" +-------------------------------------------------- + +Supported request options: + +[horizontal] +`fields`:: + +A list of fields to compute stats for. + +`level`:: + +Defines if field stats should be returned on a per index level or on a cluster +wide level. Valid values are `indices` and `cluster`. Defaults to `cluster`. + +=== Field statistics + +The field stats api is supported on string based, number based and date based fields and can return the following statistics per field: + +[horizontal] +`max_doc`:: + +The total number of documents. + +`doc_count`:: + +The number of documents that have at least one term for this field, or -1 if +this measurement isn't available on one or more shards. + +`density`:: + +The percentage of documents that have at least one value for this field. This +is a derived statistic and is based on the `max_doc` and `doc_count`. + +`sum_doc_freq`:: + +The sum of each term's document frequency in this field, or -1 if this +measurement isn't available on one or more shards. Document frequency is the +number of documents containing a particular term. + +`sum_total_term_freq`:: + +The sum of the term frequencies of all terms in this field across all +documents, or `-1` if this measurement isn't available on one or more shards. +Term frequency is the total number of occurrences of a term in a particular +document and field. + +`min_value`:: + +The lowest value in the field represented in a displayable form. + +`max_value`:: + +The highest value in the field represented in a displayable form. + +NOTE: For all the mentioned statistics, documents marked as deleted aren't taken into account. The documents marked +as deleted are are only taken into account when the segments these documents reside on are merged away. + +[float] +=== Example + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creation_date,display_name" +-------------------------------------------------- + +[source,js] +-------------------------------------------------- +{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "indices": { + "_all": { <1> + "fields": { + "creation_date": { + "max_doc": 1326564, + "doc_count": 564633, + "density": 42, + "sum_doc_freq": 2258532, + "sum_total_term_freq": -1, + "min_value": "2008-08-01T16:37:51.513Z", + "max_value": "2013-06-02T03:23:11.593Z" + }, + "display_name": { + "max_doc": 1326564, + "doc_count": 126741, + "density": 9, + "sum_doc_freq": 166535, + "sum_total_term_freq": 166616, + "min_value": "0", + "max_value": "정혜선" + }, + "answer_count": { + "max_doc": 1326564, + "doc_count": 139885, + "density": 10, + "sum_doc_freq": 559540, + "sum_total_term_freq": -1, + "min_value": 0, + "max_value": 160 + }, + "rating": { + "max_doc": 1326564, + "doc_count": 437892, + "density": 33, + "sum_doc_freq": 1751568, + "sum_total_term_freq": -1, + "min_value": -14, + "max_value": 1277 + } + } + } + } +} +-------------------------------------------------- + +<1> The `_all` key indicates that it contains the field stats of all indices in the cluster. + +With level set to `indices`: + +[source,js] +-------------------------------------------------- +curl -XGET "http://localhost:9200/_field_stats?fields=rating,answer_count,creation_date,display_name&level=indices" +-------------------------------------------------- + +[source,js] +-------------------------------------------------- +{ + "_shards": { + "total": 1, + "successful": 1, + "failed": 0 + }, + "indices": { + "stack": { <1> + "fields": { + "creation_date": { + "max_doc": 1326564, + "doc_count": 564633, + "density": 42, + "sum_doc_freq": 2258532, + "sum_total_term_freq": -1, + "min_value": "2008-08-01T16:37:51.513Z", + "max_value": "2013-06-02T03:23:11.593Z" + }, + "display_name": { + "max_doc": 1326564, + "doc_count": 126741, + "density": 9, + "sum_doc_freq": 166535, + "sum_total_term_freq": 166616, + "min_value": "0", + "max_value": "정혜선" + }, + "answer_count": { + "max_doc": 1326564, + "doc_count": 139885, + "density": 10, + "sum_doc_freq": 559540, + "sum_total_term_freq": -1, + "min_value": 0, + "max_value": 160 + }, + "rating": { + "max_doc": 1326564, + "doc_count": 437892, + "density": 33, + "sum_doc_freq": 1751568, + "sum_total_term_freq": -1, + "min_value": -14, + "max_value": 1277 + } + } + } + } +} +-------------------------------------------------- + +<1> The `stack` key means it contains all field stats for the `stack` index. \ No newline at end of file diff --git a/docs/reference/search/request-body.asciidoc b/docs/reference/search/request-body.asciidoc index 4dbb7e006dc..fadfbb191f5 100644 --- a/docs/reference/search/request-body.asciidoc +++ b/docs/reference/search/request-body.asciidoc @@ -77,7 +77,8 @@ And here is a sample response: `terminate_after`:: - experimental[] The maximum number of documents to collect for each shard, + experimental[The API for this feature may change in the future] + The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. If set, the response will have a boolean field `terminated_early` to indicate whether the query execution has actually terminated_early. Defaults to no diff --git a/docs/reference/search/request/scroll.asciidoc b/docs/reference/search/request/scroll.asciidoc index 8f33d1a6bd8..b96033719d2 100644 --- a/docs/reference/search/request/scroll.asciidoc +++ b/docs/reference/search/request/scroll.asciidoc @@ -49,7 +49,7 @@ curl -XGET 'localhost:9200/twitter/tweet/_search?scroll=1m' -d ' ' -------------------------------------------------- -The result from the above request includes a `scroll_id`, which should +The result from the above request includes a `_scroll_id`, which should be passed to the `scroll` API in order to retrieve the next batch of results. @@ -84,7 +84,7 @@ curl -XGET 'localhost:9200/_search/scroll?scroll=1m' -d 'c2Nhbjs2OzM0NDg1ODpzRlB -------------------------------------------------- IMPORTANT: The initial search request and each subsequent scroll request -returns a new `scroll_id` -- only the most recent `scroll_id` should be +returns a new `_scroll_id` -- only the most recent `_scroll_id` should be used. NOTE: If the request specifies aggregations, only the initial search response diff --git a/docs/reference/search/suggesters/context-suggest.asciidoc b/docs/reference/search/suggesters/context-suggest.asciidoc index bdc8157af00..c09659a43a9 100644 --- a/docs/reference/search/suggesters/context-suggest.asciidoc +++ b/docs/reference/search/suggesters/context-suggest.asciidoc @@ -106,7 +106,7 @@ or as reference to another field within the documents indexed: "context": { "color": { "type": "category", - "default": "red" + "default": "red", "path": "color_field" } } diff --git a/docs/reference/search/uri-request.asciidoc b/docs/reference/search/uri-request.asciidoc index b1509a3da68..a367dc679db 100644 --- a/docs/reference/search/uri-request.asciidoc +++ b/docs/reference/search/uri-request.asciidoc @@ -82,7 +82,8 @@ scores and return them as part of each hit. within the specified time value and bail with the hits accumulated up to that point when expired. Defaults to no timeout. -|`terminate_after` |experimental[] The maximum number of documents to collect for +|`terminate_after` |experimental[The API for this feature may change in the future] +The maximum number of documents to collect for each shard, upon reaching which the query execution will terminate early. If set, the response will have a boolean field `terminated_early` to indicate whether the query execution has actually terminated_early. diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc index b91d8ea17bb..f0d8fdff4d3 100644 --- a/docs/reference/setup.asciidoc +++ b/docs/reference/setup.asciidoc @@ -4,7 +4,7 @@ [partintro] -- This section includes information on how to setup *elasticsearch* and -get it running. If you haven't already, http://www.elasticsearch.org/download[download] it, and +get it running. If you haven't already, http://www.elastic.co/downloads[download] it, and then check the <> docs. NOTE: Elasticsearch can also be installed from our repositories using `apt` or `yum`. diff --git a/docs/reference/setup/as-a-service.asciidoc b/docs/reference/setup/as-a-service.asciidoc index c75e409c841..e325c1165f9 100644 --- a/docs/reference/setup/as-a-service.asciidoc +++ b/docs/reference/setup/as-a-service.asciidoc @@ -21,7 +21,6 @@ Each package features a configuration file, which allows you to set the followin `MAX_MAP_COUNT`:: Maximum number of memory map areas a process may have. If you use `mmapfs` as index store type, make sure this is set to a high value. For more information, check the https://github.com/torvalds/linux/blob/master/Documentation/sysctl/vm.txt[linux kernel documentation] about `max_map_count`. This is set via `sysctl` before starting elasticsearch. Defaults to `65535` `LOG_DIR`:: Log directory, defaults to `/var/log/elasticsearch` `DATA_DIR`:: Data directory, defaults to `/var/lib/elasticsearch` -`WORK_DIR`:: Work directory, defaults to `/tmp/elasticsearch` `CONF_DIR`:: Configuration file directory (which needs to include `elasticsearch.yml` and `logging.yml` files), defaults to `/etc/elasticsearch` `CONF_FILE`:: Path to configuration file, defaults to `/etc/elasticsearch/elasticsearch.yml` `ES_JAVA_OPTS`:: Any additional java options you may want to apply. This may be useful, if you need to set the `node.name` property, but do not want to change the `elasticsearch.yml` configuration file, because it is distributed via a provisioning system like puppet or chef. Example: `ES_JAVA_OPTS="-Des.node.name=search-01"` diff --git a/docs/reference/setup/dir-layout.asciidoc b/docs/reference/setup/dir-layout.asciidoc index 0095f60e332..19f565bec9a 100644 --- a/docs/reference/setup/dir-layout.asciidoc +++ b/docs/reference/setup/dir-layout.asciidoc @@ -18,30 +18,22 @@ on the node. Can hold multiple locations. | {path.home}/data| path.data | plugins | Plugin files location. Each plugin will be contained in a subdirectory. | {path.home}/plugins | path.plugins |======================================================================= -The multiple data locations allows to stripe it. The striping is simple, -placing whole files in one of the locations, and deciding where to place -the file based on the value of the `index.store.distributor` setting: +Multiple `data` paths may be specified, in order to spread data across +multiple disks or locations, but all of the files from a single shard will be +written to the same path. This can be configured as follows: -* `least_used` (default) always selects the directory with the most -available space + - * `random` selects directories at random. The probability of selecting -a particular directory is proportional to amount of available space in -this directory. + --------------------------------- + path.data: /mnt/first,/mnt/second + --------------------------------- -Note, there are no multiple copies of the same data, in that, its -similar to RAID 0. Though simple, it should provide a good solution for -people that don't want to mess with RAID. Here is how it is configured: + Or in an array format: ---------------------------------- -path.data: /mnt/first,/mnt/second ---------------------------------- - -Or the in an array format: - ----------------------------------------- -path.data: ["/mnt/first", "/mnt/second"] ----------------------------------------- + ---------------------------------------- + path.data: ["/mnt/first", "/mnt/second"] + ---------------------------------------- +TIP: To stripe shards across multiple disks, please use a RAID driver +instead. [float] [[default-paths]] diff --git a/docs/reference/setup/repositories.asciidoc b/docs/reference/setup/repositories.asciidoc index f8fe939604c..3bf693d33ea 100644 --- a/docs/reference/setup/repositories.asciidoc +++ b/docs/reference/setup/repositories.asciidoc @@ -22,14 +22,14 @@ Download and install the Public Signing Key: [source,sh] -------------------------------------------------- -wget -qO - https://packages.elasticsearch.org/GPG-KEY-elasticsearch | sudo apt-key add - +wget -qO - https://packages.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add - -------------------------------------------------- Add the repository definition to your `/etc/apt/sources.list` file: ["source","sh",subs="attributes,callouts"] -------------------------------------------------- -echo "deb http://packages.elasticsearch.org/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list +echo "deb http://packages.elastic.co/elasticsearch/{branch}/debian stable main" | sudo tee -a /etc/apt/sources.list -------------------------------------------------- [WARNING] @@ -65,7 +65,7 @@ Download and install the public signing key: [source,sh] -------------------------------------------------- -rpm --import https://packages.elasticsearch.org/GPG-KEY-elasticsearch +rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch -------------------------------------------------- Add the following in your `/etc/yum.repos.d/` directory @@ -75,9 +75,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo` -------------------------------------------------- [elasticsearch-{branch}] name=Elasticsearch repository for {branch}.x packages -baseurl=http://packages.elasticsearch.org/elasticsearch/{branch}/centos +baseurl=http://packages.elastic.co/elasticsearch/{branch}/centos gpgcheck=1 -gpgkey=http://packages.elasticsearch.org/GPG-KEY-elasticsearch +gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch enabled=1 -------------------------------------------------- diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc index d2a33d9e477..9f9e745808f 100644 --- a/docs/reference/setup/upgrade.asciidoc +++ b/docs/reference/setup/upgrade.asciidoc @@ -69,7 +69,7 @@ $ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{ [float] ==== 1.0 and later -To back up a running 1.0 or later system, it is simplest to use the snapshot feature. Complete instructions for backup and restore with snapshots are available http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-snapshots.html[here]. +To back up a running 1.0 or later system, it is simplest to use the snapshot feature. See the complete instructions for <>. [float] [[rolling-upgrades]] @@ -94,14 +94,9 @@ This syntax applies to Elasticsearch 1.0 and later: * Shut down a single node within the cluster. -[source,sh] --------------------------------------------- -curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown' --------------------------------------------- - * Confirm that all shards are correctly reallocated to the remaining running nodes. -* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org: +* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elastic.co: ** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration. ** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved. ** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used. @@ -149,11 +144,6 @@ This syntax is from versions prior to 1.0: -------------------------------------------------- * Stop all Elasticsearch services on all nodes in the cluster. -[source,sh] ------------------------------------------------------- - curl -XPOST 'http://localhost:9200/_shutdown' ------------------------------------------------------- - * On the first node to be upgraded, extract the archive or install the new package as described above in the Rolling Upgrades section. Repeat for all nodes. * After upgrading Elasticsearch on all nodes is complete, the cluster can be started by starting each node individually. diff --git a/docs/resiliency/index.asciidoc b/docs/resiliency/index.asciidoc index 4618a1d94b9..d52e8804392 100644 --- a/docs/resiliency/index.asciidoc +++ b/docs/resiliency/index.asciidoc @@ -22,10 +22,10 @@ improvements throughout this page to provide the full context. If you’re interested in more on how we approach ensuring resiliency in Elasticsearch, you may be interested in Igor Motov’s recent talk -http://www.elasticsearch.org/videos/improving-elasticsearch-resiliency/[Improving Elasticsearch Resiliency]. +http://www.elastic.co/videos/improving-elasticsearch-resiliency[Improving Elasticsearch Resiliency]. You may also be interested in our blog post -http://www.elasticsearch.org/blog/resiliency-elasticsearch/[Resiliency in Elasticsearch], +http://www.elastic.co/blog/resiliency-elasticsearch[Resiliency in Elasticsearch], which details our thought processes when addressing resiliency in both Elasticsearch and the work our developers do upstream in Apache Lucene. @@ -416,7 +416,7 @@ The Snapshot/Restore API supports a number of different repository types for sto [float] === Circuit Breaker: Fielddata (STATUS: DONE, v1.0.0) -Currently, the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. +Currently, the https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules-fielddata.html[circuit breaker] protects against loading too much field data by estimating how much memory the field data will take to load, then aborting the request if the memory requirements are too high. This feature was added in Elasticsearch version 1.0.0. [float] === Use of Paginated Data Structures to Ease Garbage Collection (STATUS: DONE, v1.0.0 & v1.2.0) diff --git a/pom.xml b/pom.xml index 772d7ef6578..432aa22744e 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ 5.2.0 - 1675100 + 1677039 5.2.0-snapshot-${lucene.snapshot.revision} 2.1.14 auto @@ -47,6 +47,7 @@ 512m ${basedir}/logs/ 5 + ${java.home}${file.separator}bin${file.separator}java .local-${project.version}-execution-hints.log @@ -56,11 +57,11 @@ /var/lib/elasticsearch elasticsearch elasticsearch - /tmp/elasticsearch /var/log/elasticsearch ${packaging.elasticsearch.home.dir}/plugins /var/run/elasticsearch - + false + dpkg-sig @@ -72,7 +73,7 @@ lucene-snapshots Lucene Snapshots - https://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision} + http://download.elastic.co/lucenesnapshots/${lucene.snapshot.revision} @@ -266,6 +267,12 @@ jackson-dataformat-yaml 2.5.1 compile + + + com.fasterxml.jackson.core + jackson-databind + + @@ -437,15 +444,15 @@ 1.4.0 - exec exec - java + ${jvm.executable} + -Des.security.manager.enabled=false -classpath org.elasticsearch.bootstrap.Bootstrap @@ -516,6 +523,7 @@ junit4 + ${jvm.executable} 10 pipe,warn true @@ -628,7 +636,7 @@ ${tests.compatibility} true - ${basedir}/dev-tools/tests.policy + ${basedir}/src/main/resources/org/elasticsearch/bootstrap/security.policy @@ -1080,6 +1088,11 @@ jdeb + ${deb.sign} + ${gpg.keyring} + ${gpg.key} + ${gpg.passphrase} + ${deb.sign.method} @@ -1208,7 +1221,6 @@ ${packaging.elasticsearch.data.dir} ${packaging.elasticsearch.log.dir} - ${packaging.elasticsearch.work.dir} ${packaging.elasticsearch.plugins.dir} ${packaging.elasticsearch.pid.dir} @@ -1245,6 +1257,11 @@ 755 root root + ${gpg.key} + ${gpg.keyring} + + ${gpg.passphrase} + @@ -1406,12 +1423,6 @@ ${packaging.elasticsearch.user} ${packaging.elasticsearch.group} - - ${packaging.elasticsearch.work.dir} - 755 - ${packaging.elasticsearch.user} - ${packaging.elasticsearch.group} - ${packaging.elasticsearch.plugins.dir} 755 @@ -1621,7 +1632,7 @@ - + diff --git a/rest-api-spec/api/count.json b/rest-api-spec/api/count.json index 998667c2ef2..c25997633b2 100644 --- a/rest-api-spec/api/count.json +++ b/rest-api-spec/api/count.json @@ -41,10 +41,6 @@ "routing": { "type" : "string", "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" } } }, diff --git a/rest-api-spec/api/count_percolate.json b/rest-api-spec/api/count_percolate.json index 57d19ae44fe..584f33685d3 100644 --- a/rest-api-spec/api/count_percolate.json +++ b/rest-api-spec/api/count_percolate.json @@ -23,10 +23,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "routing": { "type": "list", "description": "A comma-separated list of specific routing values" diff --git a/rest-api-spec/api/delete_by_query.json b/rest-api-spec/api/delete_by_query.json deleted file mode 100644 index a91b7be2c95..00000000000 --- a/rest-api-spec/api/delete_by_query.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "delete_by_query": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/docs-delete-by-query.html", - "methods": ["DELETE"], - "url": { - "path": "/{index}/_query", - "paths": ["/{index}/_query", "/{index}/{type}/_query"], - "parts": { - "index": { - "type" : "list", - "required": true, - "description" : "A comma-separated list of indices to restrict the operation; use `_all` to perform the operation on all indices" - }, - "type": { - "type" : "list", - "description" : "A comma-separated list of types to restrict the operation" - } - }, - "params": { - "analyzer": { - "type" : "string", - "description" : "The analyzer to use for the query string" - }, - "consistency": { - "type" : "enum", - "options" : ["one", "quorum", "all"], - "description" : "Specific write consistency setting for the operation" - }, - "default_operator": { - "type" : "enum", - "options" : ["AND","OR"], - "default" : "OR", - "description" : "The default operator for query string query (AND or OR)" - }, - "df": { - "type" : "string", - "description" : "The field to use as default where no field prefix is given in the query string" - }, - "ignore_unavailable": { - "type" : "boolean", - "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" - }, - "allow_no_indices": { - "type" : "boolean", - "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" - }, - "expand_wildcards": { - "type" : "enum", - "options" : ["open","closed","none","all"], - "default" : "open", - "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." - }, - "q": { - "type" : "string", - "description" : "Query in the Lucene query string syntax" - }, - "routing": { - "type" : "string", - "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" - }, - "timeout": { - "type" : "time", - "description" : "Explicit operation timeout" - } - } - }, - "body": { - "description" : "A query to restrict the operation specified with the Query DSL" - } - } -} diff --git a/rest-api-spec/api/explain.json b/rest-api-spec/api/explain.json index 6e766823ad8..30b5deff1d3 100644 --- a/rest-api-spec/api/explain.json +++ b/rest-api-spec/api/explain.json @@ -69,10 +69,6 @@ "type" : "string", "description" : "Specific routing value" }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" - }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" diff --git a/rest-api-spec/api/field_stats.json b/rest-api-spec/api/field_stats.json new file mode 100644 index 00000000000..e3c5e6d45df --- /dev/null +++ b/rest-api-spec/api/field_stats.json @@ -0,0 +1,46 @@ +{ + "field_stats": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/search-field-stats.html", + "methods": ["GET", "POST"], + "url": { + "path": "/_field_stats", + "paths": [ + "/_field_stats", + "/{index}/_field_stats" + ], + "parts": { + "index": { + "type" : "list", + "description" : "A comma-separated list of index names; use `_all` or empty string to perform the operation on all indices" + } + }, + "params": { + "fields": { + "type" : "list", + "description" : "A comma-separated list of fields for to get field statistics for (min value, max value, and more)" + }, + "level": { + "type" : "enum", + "options" : ["indices", "cluster"], + "default" : "cluster", + "description" : "Defines if field stats should be returned on a per index level or on a cluster wide level" + }, + "ignore_unavailable": { + "type" : "boolean", + "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type" : "boolean", + "description" : "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type" : "enum", + "options" : ["open","closed","none","all"], + "default" : "open", + "description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both." + } + } + }, + "body": null + } +} diff --git a/rest-api-spec/api/indices.analyze.json b/rest-api-spec/api/indices.analyze.json index d1d87c5a867..372693b794a 100644 --- a/rest-api-spec/api/indices.analyze.json +++ b/rest-api-spec/api/indices.analyze.json @@ -12,10 +12,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "analyzer": { "type" : "string", "description" : "The name of the analyzer to use" diff --git a/rest-api-spec/api/indices.clear_cache.json b/rest-api-spec/api/indices.clear_cache.json index 9fd73acbd01..c8e3e84de88 100644 --- a/rest-api-spec/api/indices.clear_cache.json +++ b/rest-api-spec/api/indices.clear_cache.json @@ -32,10 +32,6 @@ "type" : "boolean", "description" : "Clear filter caches" }, - "filter_keys": { - "type" : "boolean", - "description" : "A comma-separated list of keys to clear when using the `filter_cache` parameter (default: all)" - }, "id": { "type" : "boolean", "description" : "Clear ID caches for parent/child" diff --git a/rest-api-spec/api/indices.validate_query.json b/rest-api-spec/api/indices.validate_query.json index 2b93c241215..c96cd109b3d 100644 --- a/rest-api-spec/api/indices.validate_query.json +++ b/rest-api-spec/api/indices.validate_query.json @@ -37,10 +37,6 @@ "operation_threading": { "description" : "TODO: ?" }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" - }, "q": { "type" : "string", "description" : "Query in the Lucene query string syntax" diff --git a/rest-api-spec/api/mget.json b/rest-api-spec/api/mget.json index 38d8ce20420..1639f3619b3 100644 --- a/rest-api-spec/api/mget.json +++ b/rest-api-spec/api/mget.json @@ -16,10 +16,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "fields": { "type": "list", "description" : "A comma-separated list of fields to return in the response" diff --git a/rest-api-spec/api/mlt.json b/rest-api-spec/api/mlt.json index b5b405912d4..0dc58782dff 100644 --- a/rest-api-spec/api/mlt.json +++ b/rest-api-spec/api/mlt.json @@ -23,10 +23,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "boost_terms": { "type" : "number", "description" : "The boost factor" diff --git a/rest-api-spec/api/mpercolate.json b/rest-api-spec/api/mpercolate.json index 69c92511ed7..7cbf4f61e43 100644 --- a/rest-api-spec/api/mpercolate.json +++ b/rest-api-spec/api/mpercolate.json @@ -16,10 +16,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "ignore_unavailable": { "type": "boolean", "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/api/msearch.json b/rest-api-spec/api/msearch.json index f233b304492..3d8297e496d 100644 --- a/rest-api-spec/api/msearch.json +++ b/rest-api-spec/api/msearch.json @@ -16,10 +16,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "search_type": { "type" : "enum", "options" : ["query_then_fetch", "query_and_fetch", "dfs_query_then_fetch", "dfs_query_and_fetch", "count", "scan"], diff --git a/rest-api-spec/api/mtermvectors.json b/rest-api-spec/api/mtermvectors.json index b40ac72860b..58978b7d190 100644 --- a/rest-api-spec/api/mtermvectors.json +++ b/rest-api-spec/api/mtermvectors.json @@ -16,10 +16,6 @@ } }, "params" : { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "ids" : { "type" : "list", "description" : "A comma-separated list of documents ids. You must define ids as parameter or set \"ids\" or \"docs\" in the request body", @@ -97,4 +93,4 @@ } } -} \ No newline at end of file +} diff --git a/rest-api-spec/api/nodes.shutdown.json b/rest-api-spec/api/nodes.shutdown.json deleted file mode 100644 index 6c8bc42f0f4..00000000000 --- a/rest-api-spec/api/nodes.shutdown.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "nodes.shutdown": { - "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/cluster-nodes-shutdown.html", - "methods": ["POST"], - "url": { - "path": "/_shutdown", - "paths": ["/_shutdown", "/_cluster/nodes/_shutdown", "/_cluster/nodes/{node_id}/_shutdown"], - "parts": { - "node_id": { - "type" : "list", - "description" : "A comma-separated list of node IDs or names to perform the operation on; use `_local` to perform the operation on the node you're connected to, leave empty to perform the operation on all nodes" - } - }, - "params": { - "delay": { - "type" : "time", - "description" : "Set the delay for the operation (default: 1s)" - }, - "exit": { - "type" : "boolean", - "description" : "Exit the JVM as well (default: true)" - } - } - }, - "body": null - } -} diff --git a/rest-api-spec/api/percolate.json b/rest-api-spec/api/percolate.json index 3ea1dca776e..e58655dea5a 100644 --- a/rest-api-spec/api/percolate.json +++ b/rest-api-spec/api/percolate.json @@ -23,10 +23,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "routing": { "type" : "list", "description" : "A comma-separated list of specific routing values" diff --git a/rest-api-spec/api/scroll.json b/rest-api-spec/api/scroll.json index 50cee5fbc2a..885b746d095 100644 --- a/rest-api-spec/api/scroll.json +++ b/rest-api-spec/api/scroll.json @@ -12,10 +12,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "scroll": { "type" : "duration", "description" : "Specify how long a consistent view of the index should be maintained for scrolled search" diff --git a/rest-api-spec/api/search.json b/rest-api-spec/api/search.json index 2a6ef4cc481..1f26c3e6a89 100644 --- a/rest-api-spec/api/search.json +++ b/rest-api-spec/api/search.json @@ -101,10 +101,6 @@ "type" : "list", "description" : "A comma-separated list of : pairs" }, - "source": { - "type" : "string", - "description" : "The URL-encoded request definition using the Query DSL (instead of using request body)" - }, "_source": { "type" : "list", "description" : "True or false to return the _source field or not, or a list of fields to return" diff --git a/rest-api-spec/api/search_exists.json b/rest-api-spec/api/search_exists.json index 10ac51eddfb..4f52b272673 100644 --- a/rest-api-spec/api/search_exists.json +++ b/rest-api-spec/api/search_exists.json @@ -41,10 +41,6 @@ "routing": { "type" : "string", "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded query definition (instead of using the request body)" } } }, diff --git a/rest-api-spec/api/search_template.json b/rest-api-spec/api/search_template.json index a210f3aeb33..a1122f19a1e 100644 --- a/rest-api-spec/api/search_template.json +++ b/rest-api-spec/api/search_template.json @@ -16,10 +16,6 @@ } }, "params" : { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "ignore_unavailable": { "type" : "boolean", "description" : "Whether specified concrete indices should be ignored when unavailable (missing or closed)" diff --git a/rest-api-spec/api/suggest.json b/rest-api-spec/api/suggest.json index 974ae158077..ca0ae8b4f3c 100644 --- a/rest-api-spec/api/suggest.json +++ b/rest-api-spec/api/suggest.json @@ -33,10 +33,6 @@ "routing": { "type" : "string", "description" : "Specific routing value" - }, - "source": { - "type" : "string", - "description" : "The URL-encoded request definition (instead of using request body)" } } }, diff --git a/rest-api-spec/api/termvectors.json b/rest-api-spec/api/termvectors.json index b0f5af89e94..147d7971c9c 100644 --- a/rest-api-spec/api/termvectors.json +++ b/rest-api-spec/api/termvectors.json @@ -22,10 +22,6 @@ } }, "params": { - "source": { - "type" : "string", - "description" : "The URL-encoded request definition" - }, "term_statistics" : { "type" : "boolean", "description" : "Specifies if total term frequency and document frequency should be returned.", diff --git a/rest-api-spec/test/cluster.put_settings/10_basic.yaml b/rest-api-spec/test/cluster.put_settings/10_basic.yaml index bb1256efecd..9955f4519d6 100644 --- a/rest-api-spec/test/cluster.put_settings/10_basic.yaml +++ b/rest-api-spec/test/cluster.put_settings/10_basic.yaml @@ -1,7 +1,7 @@ --- setup: - skip: - version: " - " + version: "all" reason: leaves transient metadata behind, need to fix it --- "Test put settings": diff --git a/rest-api-spec/test/create/50_parent.yaml b/rest-api-spec/test/create/50_parent.yaml index dcd24d99346..6fe64b7bbed 100644 --- a/rest-api-spec/test/create/50_parent.yaml +++ b/rest-api-spec/test/create/50_parent.yaml @@ -14,7 +14,7 @@ wait_for_status: yellow - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ create: index: test_1 type: test diff --git a/rest-api-spec/test/create/75_ttl.yaml b/rest-api-spec/test/create/75_ttl.yaml index 099ed279020..3354e3b0517 100644 --- a/rest-api-spec/test/create/75_ttl.yaml +++ b/rest-api-spec/test/create/75_ttl.yaml @@ -89,7 +89,7 @@ type: test id: 1 - do: - catch: /AlreadyExpiredException/ + catch: /already_expired_exception/ create: index: test_1 type: test diff --git a/rest-api-spec/test/delete/42_missing_parent.yml b/rest-api-spec/test/delete/42_missing_parent.yml index 8247f8885e9..d72c5a83d59 100644 --- a/rest-api-spec/test/delete/42_missing_parent.yml +++ b/rest-api-spec/test/delete/42_missing_parent.yml @@ -21,7 +21,7 @@ body: { foo: bar } - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ delete: index: test_1 type: test diff --git a/rest-api-spec/test/delete_by_query/10_basic.yaml b/rest-api-spec/test/delete_by_query/10_basic.yaml deleted file mode 100644 index c253ad8d276..00000000000 --- a/rest-api-spec/test/delete_by_query/10_basic.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -"Basic delete_by_query": - - do: - index: - index: test_1 - type: test - id: 1 - body: { foo: bar } - - - do: - index: - index: test_1 - type: test - id: 2 - body: { foo: baz } - - - do: - index: - index: test_1 - type: test - id: 3 - body: { foo: foo } - - - do: - indices.refresh: {} - - - do: - delete_by_query: - index: test_1 - body: - query: - match: - foo: bar - - - do: - indices.refresh: {} - - - do: - count: - index: test_1 - - - match: { count: 2 } diff --git a/rest-api-spec/test/field_stats/10_basics.yaml b/rest-api-spec/test/field_stats/10_basics.yaml new file mode 100644 index 00000000000..61a575603da --- /dev/null +++ b/rest-api-spec/test/field_stats/10_basics.yaml @@ -0,0 +1,52 @@ +--- +"Basic field stats": + - do: + index: + index: test_1 + type: test + id: id_1 + body: { foo: "bar", number: 123 } + + - do: + indices.refresh: {} + + - do: + field_stats: + index: test_1 + fields: [foo, number] + + - match: { indices._all.fields.foo.max_doc: 1 } + - match: { indices._all.fields.foo.doc_count: 1 } + - match: { indices._all.fields.foo.min_value: "bar" } + - match: { indices._all.fields.foo.max_value: "bar" } + - match: { indices._all.fields.number.max_doc: 1 } + - match: { indices._all.fields.number.doc_count: 1 } + - match: { indices._all.fields.number.min_value: 123 } + - match: { indices._all.fields.number.max_value: 123 } + +--- +"Basic field stats with level set to indices": + - do: + index: + index: test_1 + type: test + id: id_1 + body: { foo: "bar", number: 123 } + + - do: + indices.refresh: {} + + - do: + field_stats: + index: test_1 + fields: [foo, number] + level: indices + + - match: { indices.test_1.fields.foo.max_doc: 1 } + - match: { indices.test_1.fields.foo.doc_count: 1 } + - match: { indices.test_1.fields.foo.min_value: "bar" } + - match: { indices.test_1.fields.foo.max_value: "bar" } + - match: { indices.test_1.fields.number.max_doc: 1 } + - match: { indices.test_1.fields.number.doc_count: 1 } + - match: { indices.test_1.fields.number.min_value: 123 } + - match: { indices.test_1.fields.number.max_value: 123 } diff --git a/rest-api-spec/test/index/50_parent.yaml b/rest-api-spec/test/index/50_parent.yaml index 551d30d95d7..28ab61cb49b 100644 --- a/rest-api-spec/test/index/50_parent.yaml +++ b/rest-api-spec/test/index/50_parent.yaml @@ -13,7 +13,7 @@ wait_for_status: yellow - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ index: index: test_1 type: test diff --git a/rest-api-spec/test/index/75_ttl.yaml b/rest-api-spec/test/index/75_ttl.yaml index 1d73d4ed40f..554933654fb 100644 --- a/rest-api-spec/test/index/75_ttl.yaml +++ b/rest-api-spec/test/index/75_ttl.yaml @@ -74,7 +74,7 @@ # with timestamp - do: - catch: /AlreadyExpiredException/ + catch: /already_expired_exception/ index: index: test_1 type: test diff --git a/rest-api-spec/test/indices.exists/20_read_only_index.yaml b/rest-api-spec/test/indices.exists/20_read_only_index.yaml new file mode 100644 index 00000000000..24d2dcdc08f --- /dev/null +++ b/rest-api-spec/test/indices.exists/20_read_only_index.yaml @@ -0,0 +1,30 @@ +--- +"Test indices.exists on a read only index": + + - do: + indices.create: + index: test_index_ro + + - do: + indices.put_settings: + index: test_index_ro + body: + index.blocks.read_only: true + + - do: + indices.exists: + index: test_index_ro + + - is_true: '' + + - do: + indices.put_settings: + index: test_index_ro + body: + index.blocks.read_only: false + + - do: + indices.exists: + index: test_index_ro + + - is_true: '' diff --git a/rest-api-spec/test/indices.put_settings/all_path_options.yaml b/rest-api-spec/test/indices.put_settings/all_path_options.yaml index bd64d57ff17..07f1956f0fc 100644 --- a/rest-api-spec/test/indices.put_settings/all_path_options.yaml +++ b/rest-api-spec/test/indices.put_settings/all_path_options.yaml @@ -81,7 +81,7 @@ setup: --- "put settings in list of indices": - skip: - version: " - " + version: "all" reason: list of indices not implemented yet - do: indices.put_settings: diff --git a/rest-api-spec/test/indices.stats/12_level.yaml b/rest-api-spec/test/indices.stats/12_level.yaml index c766f5eb625..fb71e8d2032 100644 --- a/rest-api-spec/test/indices.stats/12_level.yaml +++ b/rest-api-spec/test/indices.stats/12_level.yaml @@ -15,6 +15,10 @@ setup: id: 1 body: { "foo": "baz" } + - do: + cluster.health: + wait_for_status: yellow + --- "Level - blank": - do: diff --git a/rest-api-spec/test/mget/13_missing_metadata.yaml b/rest-api-spec/test/mget/13_missing_metadata.yaml index 11b4a129406..8d986a330bf 100644 --- a/rest-api-spec/test/mget/13_missing_metadata.yaml +++ b/rest-api-spec/test/mget/13_missing_metadata.yaml @@ -13,27 +13,27 @@ wait_for_status: yellow - do: - catch: /ActionRequestValidationException.+ id is missing/ + catch: /action_request_validation_exception.+ id is missing/ mget: body: docs: - { _index: test_1, _type: test} - do: - catch: /ActionRequestValidationException.+ index is missing/ + catch: /action_request_validation_exception.+ index is missing/ mget: body: docs: - { _type: test, _id: 1} - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: body: docs: [] - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: body: {} diff --git a/rest-api-spec/test/mget/15_ids.yaml b/rest-api-spec/test/mget/15_ids.yaml index a86fc2cdc6e..cdd6c5724fe 100644 --- a/rest-api-spec/test/mget/15_ids.yaml +++ b/rest-api-spec/test/mget/15_ids.yaml @@ -59,14 +59,14 @@ - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: index: test_1 body: ids: [] - do: - catch: /ActionRequestValidationException.+ no documents to get/ + catch: /action_request_validation_exception.+ no documents to get/ mget: index: test_1 body: {} diff --git a/rest-api-spec/test/mpercolate/10_basic.yaml b/rest-api-spec/test/mpercolate/10_basic.yaml index 70118c93da1..9f949c21cd6 100644 --- a/rest-api-spec/test/mpercolate/10_basic.yaml +++ b/rest-api-spec/test/mpercolate/10_basic.yaml @@ -37,5 +37,5 @@ foo: bar - match: { responses.0.total: 1 } - - match: { responses.1.error: "IndexMissingException[[percolator_index1] missing]" } + - match: { responses.1.error: "/IndexMissingException.no.such.index./" } - match: { responses.2.total: 1 } diff --git a/rest-api-spec/test/msearch/10_basic.yaml b/rest-api-spec/test/msearch/10_basic.yaml index c0786229926..a028853429e 100644 --- a/rest-api-spec/test/msearch/10_basic.yaml +++ b/rest-api-spec/test/msearch/10_basic.yaml @@ -39,7 +39,7 @@ match: {foo: bar} - match: { responses.0.hits.total: 3 } - - match: { responses.1.error: "IndexMissingException[[test_2] missing]" } + - match: { responses.1.error: "/IndexMissingException.no.such.index./" } - match: { responses.2.hits.total: 1 } diff --git a/rest-api-spec/test/script/10_basic.yaml b/rest-api-spec/test/script/10_basic.yaml index 822d8879771..ee977429b59 100644 --- a/rest-api-spec/test/script/10_basic.yaml +++ b/rest-api-spec/test/script/10_basic.yaml @@ -60,7 +60,7 @@ - do: - catch: /ElasticsearchIllegalArgumentException.Unable.to.parse.*/ + catch: /Unable.to.parse.*/ put_script: id: "1" lang: "groovy" @@ -74,7 +74,7 @@ body: { "script" : "_score * doc[\"myParent.weight\"].value" } - do: - catch: /ElasticsearchIllegalArgumentException.script_lang.not.supported/ + catch: /script_lang.not.supported/ put_script: id: "1" lang: "foobar" diff --git a/rest-api-spec/test/template/10_basic.yaml b/rest-api-spec/test/template/10_basic.yaml index 838a21d5a56..bd1fd436648 100644 --- a/rest-api-spec/test/template/10_basic.yaml +++ b/rest-api-spec/test/template/10_basic.yaml @@ -50,7 +50,7 @@ body: { "template": { "query": { "match{{}}_all": {}}, "size": "{{my_size}}" } } - do: - catch: /ElasticsearchIllegalArgumentException\SUnable\sto\sparse.*/ + catch: /Unable\sto\sparse.*/ put_template: id: "1" body: { "template": { "query": { "match{{}}_all": {}}, "size": "{{my_size}}" } } diff --git a/rest-api-spec/test/template/20_search.yaml b/rest-api-spec/test/template/20_search.yaml index 55f886c6412..d8e7364d545 100644 --- a/rest-api-spec/test/template/20_search.yaml +++ b/rest-api-spec/test/template/20_search.yaml @@ -37,7 +37,7 @@ - match: { hits.total: 1 } - do: - catch: /ElasticsearchIllegalArgumentException.Unable.to.find.on.disk.script.simple1/ + catch: /Unable.to.find.on.disk.script.simple1/ search_template: body: { "template" : "simple1" } diff --git a/rest-api-spec/test/update/50_parent.yaml b/rest-api-spec/test/update/50_parent.yaml index 3d15ea9f2a8..bc64665e919 100644 --- a/rest-api-spec/test/update/50_parent.yaml +++ b/rest-api-spec/test/update/50_parent.yaml @@ -15,7 +15,7 @@ setup: "Parent": - do: - catch: /RoutingMissingException/ + catch: /routing_missing_exception/ update: index: test_1 type: test diff --git a/rest-api-spec/test/update/75_ttl.yaml b/rest-api-spec/test/update/75_ttl.yaml index f6b05b9eca2..8072c4d400f 100644 --- a/rest-api-spec/test/update/75_ttl.yaml +++ b/rest-api-spec/test/update/75_ttl.yaml @@ -81,7 +81,7 @@ # with timestamp - do: - catch: /AlreadyExpiredException/ + catch: /already_expired_exception/ index: index: test_1 type: test diff --git a/rest-api-spec/test/update/85_fields_meta.yaml b/rest-api-spec/test/update/85_fields_meta.yaml index ab38d5c1315..d10ac83f46b 100644 --- a/rest-api-spec/test/update/85_fields_meta.yaml +++ b/rest-api-spec/test/update/85_fields_meta.yaml @@ -2,7 +2,7 @@ "Metadata Fields": - skip: - version: " - " + version: "all" reason: "Update doesn't return metadata fields, waiting for #3259" - do: diff --git a/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java b/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java index 8a8f1fce31d..0b7c433da7f 100644 --- a/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java +++ b/src/main/java/org/apache/lucene/analysis/PrefixAnalyzer.java @@ -21,7 +21,6 @@ package org.apache.lucene.analysis; import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.io.IOException; import java.util.Collections; @@ -97,7 +96,7 @@ public class PrefixAnalyzer extends Analyzer { this.currentPrefix = null; this.separator = separator; if (prefixes == null || !prefixes.iterator().hasNext()) { - throw new ElasticsearchIllegalArgumentException("one or more prefixes needed"); + throw new IllegalArgumentException("one or more prefixes needed"); } } diff --git a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java index c87f9144709..d55374cd5b9 100644 --- a/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java +++ b/src/main/java/org/apache/lucene/queryparser/classic/MapperQueryParser.java @@ -39,6 +39,7 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.support.QueryParsers; @@ -131,9 +132,6 @@ public class MapperQueryParser extends QueryParser { setFuzzyMinSim(settings.fuzzyMinSim()); setFuzzyPrefixLength(settings.fuzzyPrefixLength()); setLocale(settings.locale()); - if (settings.timeZone() != null) { - setTimeZone(settings.timeZone().toTimeZone()); - } this.analyzeWildcard = settings.analyzeWildcard(); } @@ -377,7 +375,14 @@ public class MapperQueryParser extends QueryParser { } try { - return currentMapper.rangeQuery(part1, part2, startInclusive, endInclusive, parseContext); + Query rangeQuery; + if (currentMapper instanceof DateFieldMapper && settings.timeZone() != null) { + DateFieldMapper dateFieldMapper = (DateFieldMapper) this.currentMapper; + rangeQuery = dateFieldMapper.rangeQuery(part1, part2, startInclusive, endInclusive, settings.timeZone(), null, parseContext); + } else { + rangeQuery = currentMapper.rangeQuery(part1, part2, startInclusive, endInclusive, parseContext); + } + return rangeQuery; } catch (RuntimeException e) { if (settings.lenient()) { return null; diff --git a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java index 7528206f6ae..936fe490a5d 100644 --- a/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java +++ b/src/main/java/org/apache/lucene/search/postingshighlight/CustomPostingsHighlighter.java @@ -18,9 +18,9 @@ package org.apache.lucene.search.postingshighlight; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Strings; @@ -91,8 +91,7 @@ public final class CustomPostingsHighlighter extends XPostingsHighlighter { /* Our own api to highlight a single document field, passing in the query terms, and get back our own Snippet object */ - public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexSearcher searcher, int docId, int maxPassages) throws IOException { - IndexReader reader = searcher.getIndexReader(); + public Snippet[] highlightDoc(String field, BytesRef[] terms, IndexReader reader, int docId, int maxPassages) throws IOException { IndexReaderContext readerContext = reader.getContext(); List leaves = readerContext.leaves(); diff --git a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java index ae021b07b09..e50c92065e2 100644 --- a/src/main/java/org/apache/lucene/store/StoreRateLimiting.java +++ b/src/main/java/org/apache/lucene/store/StoreRateLimiting.java @@ -19,7 +19,6 @@ package org.apache.lucene.store; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.ByteSizeValue; @@ -42,7 +41,7 @@ public class StoreRateLimiting { MERGE, ALL; - public static Type fromString(String type) throws ElasticsearchIllegalArgumentException { + public static Type fromString(String type) { if ("none".equalsIgnoreCase(type)) { return NONE; } else if ("merge".equalsIgnoreCase(type)) { @@ -50,7 +49,7 @@ public class StoreRateLimiting { } else if ("all".equalsIgnoreCase(type)) { return ALL; } - throw new ElasticsearchIllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]"); + throw new IllegalArgumentException("rate limiting type [" + type + "] not valid, can be one of [all|merge|none]"); } } @@ -88,7 +87,7 @@ public class StoreRateLimiting { this.type = type; } - public void setType(String type) throws ElasticsearchIllegalArgumentException { + public void setType(String type) { this.type = Type.fromString(type); } } diff --git a/src/main/java/org/elasticsearch/ElasticsearchException.java b/src/main/java/org/elasticsearch/ElasticsearchException.java index fd7c9186875..a622b0f7e81 100644 --- a/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -22,18 +22,23 @@ package org.elasticsearch; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.rest.HasRestHeaders; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; import java.util.List; import java.util.Map; /** * A base class for all elasticsearch exceptions. */ -public class ElasticsearchException extends RuntimeException { +public class ElasticsearchException extends RuntimeException implements ToXContent { + + public static final String REST_EXCEPTION_SKIP_CAUSE = "rest.exception.skip_cause"; /** * Construct a ElasticsearchException with the specified detail message. @@ -62,12 +67,8 @@ public class ElasticsearchException extends RuntimeException { Throwable cause = unwrapCause(); if (cause == this) { return RestStatus.INTERNAL_SERVER_ERROR; - } else if (cause instanceof ElasticsearchException) { - return ((ElasticsearchException) cause).status(); - } else if (cause instanceof IllegalArgumentException) { - return RestStatus.BAD_REQUEST; } else { - return RestStatus.INTERNAL_SERVER_ERROR; + return ExceptionsHelper.status(cause); } } @@ -114,19 +115,6 @@ public class ElasticsearchException extends RuntimeException { return rootCause; } - /** - * Retrieve the most specific cause of this exception, that is, - * either the innermost cause (root cause) or this exception itself. - *

Differs from {@link #getRootCause()} in that it falls back - * to the present exception if there is no root cause. - * - * @return the most specific cause (never null) - */ - public Throwable getMostSpecificCause() { - Throwable rootCause = getRootCause(); - return (rootCause != null ? rootCause : this); - } - /** * Check whether this exception contains an exception of the given type: * either it is of the given class itself or it contains a nested cause @@ -175,21 +163,6 @@ public class ElasticsearchException extends RuntimeException { this.headers = headers(headers); } - public WithRestHeaders(String msg, @Nullable ImmutableMap> headers) { - super(msg); - this.headers = headers != null ? headers : ImmutableMap.>of(); - } - - public WithRestHeaders(String msg, Throwable cause, Tuple... headers) { - super(msg, cause); - this.headers = headers(headers); - } - - public WithRestHeaders(String msg, Throwable cause, @Nullable ImmutableMap> headers) { - super(msg, cause); - this.headers = headers != null ? headers : ImmutableMap.>of(); - } - @Override public ImmutableMap> getHeaders() { return headers; @@ -215,4 +188,106 @@ public class ElasticsearchException extends RuntimeException { return ImmutableMap.copyOf(map); } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + if (this instanceof ElasticsearchWrapperException) { + toXContent(builder, params, this); + } else { + builder.field("type", getExceptionName()); + builder.field("reason", getMessage()); + innerToXContent(builder, params); + } + return builder; + } + + /** + * Renders additional per exception information into the xcontent + */ + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + causeToXContent(builder, params); + } + + /** + * Renders a cause exception as xcontent + */ + protected final void causeToXContent(XContentBuilder builder, Params params) throws IOException { + final Throwable cause = getCause(); + if (cause != null && params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, false) == false) { + builder.field("caused_by"); + builder.startObject(); + toXContent(builder, params, cause); + builder.endObject(); + } + } + + /** + * Statis toXContent helper method that also renders non {@link org.elasticsearch.ElasticsearchException} instances as XContent. + */ + public static void toXContent(XContentBuilder builder, Params params, Throwable ex) throws IOException { + ex = ExceptionsHelper.unwrapCause(ex); + if (ex instanceof ElasticsearchException) { + ((ElasticsearchException) ex).toXContent(builder, params); + } else { + builder.field("type", getExceptionName(ex)); + builder.field("reason", ex.getMessage()); + if (ex.getCause() != null) { + builder.field("caused_by"); + builder.startObject(); + toXContent(builder, params, ex.getCause()); + builder.endObject(); + } + } + } + + /** + * Returns the root cause of this exception or mupltiple if different shards caused different exceptions + */ + public ElasticsearchException[] guessRootCauses() { + final Throwable cause = getCause(); + if (cause != null && cause instanceof ElasticsearchException) { + return ((ElasticsearchException) cause).guessRootCauses(); + } + return new ElasticsearchException[] {this}; + } + + /** + * Returns the root cause of this exception or mupltiple if different shards caused different exceptions. + * If the given exception is not an instance of {@link org.elasticsearch.ElasticsearchException} an empty array + * is returned. + */ + public static ElasticsearchException[] guessRootCauses(Throwable t) { + Throwable ex = ExceptionsHelper.unwrapCause(t); + if (ex instanceof ElasticsearchException) { + return ((ElasticsearchException) ex).guessRootCauses(); + } + return new ElasticsearchException[] {new ElasticsearchException(t.getMessage(), t) { + @Override + protected String getExceptionName() { + return getExceptionName(getCause()); + } + }}; + } + + protected String getExceptionName() { + return getExceptionName(this); + } + + /** + * Returns a underscore case name for the given exception. This method strips Elasticsearch prefixes from exception names. + */ + public static String getExceptionName(Throwable ex) { + String simpleName = ex.getClass().getSimpleName(); + if (simpleName.startsWith("Elasticsearch")) { + simpleName = simpleName.substring("Elasticsearch".length()); + } + return Strings.toUnderscoreCase(simpleName); + } + + @Override + public String toString() { + return ExceptionsHelper.detailedMessage(this).trim(); + } + + } diff --git a/src/main/java/org/elasticsearch/ExceptionsHelper.java b/src/main/java/org/elasticsearch/ExceptionsHelper.java index 552d339301e..9c29a4dc0aa 100644 --- a/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -54,8 +54,12 @@ public final class ExceptionsHelper { } public static RestStatus status(Throwable t) { - if (t instanceof ElasticsearchException) { - return ((ElasticsearchException) t).status(); + if (t != null) { + if (t instanceof ElasticsearchException) { + return ((ElasticsearchException) t).status(); + } else if (t instanceof IllegalArgumentException) { + return RestStatus.BAD_REQUEST; + } } return RestStatus.INTERNAL_SERVER_ERROR; } diff --git a/src/main/java/org/elasticsearch/Version.java b/src/main/java/org/elasticsearch/Version.java index d34f1fb2f97..7f420e6bb5d 100644 --- a/src/main/java/org/elasticsearch/Version.java +++ b/src/main/java/org/elasticsearch/Version.java @@ -227,13 +227,17 @@ public class Version { public static final int V_1_4_4_ID = 1040499; public static final Version V_1_4_4 = new Version(V_1_4_4_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_3); public static final int V_1_4_5_ID = 1040599; - public static final Version V_1_4_5 = new Version(V_1_4_5_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final Version V_1_4_5 = new Version(V_1_4_5_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_4_6_ID = 1040699; + public static final Version V_1_4_6 = new Version(V_1_4_6_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_5_0_ID = 1050099; public static final Version V_1_5_0 = new Version(V_1_5_0_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_5_1_ID = 1050199; public static final Version V_1_5_1 = new Version(V_1_5_1_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_5_2_ID = 1050299; - public static final Version V_1_5_2 = new Version(V_1_5_2_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final Version V_1_5_2 = new Version(V_1_5_2_ID, false, org.apache.lucene.util.Version.LUCENE_4_10_4); + public static final int V_1_5_3_ID = 1050399; + public static final Version V_1_5_3 = new Version(V_1_5_3_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_1_6_0_ID = 1060099; public static final Version V_1_6_0 = new Version(V_1_6_0_ID, true, org.apache.lucene.util.Version.LUCENE_4_10_4); public static final int V_2_0_0_ID = 2000099; @@ -255,12 +259,16 @@ public class Version { return V_2_0_0; case V_1_6_0_ID: return V_1_6_0; + case V_1_5_3_ID: + return V_1_5_3; case V_1_5_2_ID: return V_1_5_2; case V_1_5_1_ID: return V_1_5_1; case V_1_5_0_ID: return V_1_5_0; + case V_1_4_6_ID: + return V_1_4_6; case V_1_4_5_ID: return V_1_4_5; case V_1_4_4_ID: @@ -451,12 +459,12 @@ public class Version { /** * Return the {@link Version} of Elasticsearch that has been used to create an index given its settings. * - * @throws ElasticsearchIllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED} + * @throws IllegalStateException if the given index settings doesn't contain a value for the key {@value IndexMetaData#SETTING_VERSION_CREATED} */ public static Version indexCreated(Settings indexSettings) { final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null); if (indexVersion == null) { - throw new ElasticsearchIllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]"); + throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]"); } return indexVersion; } diff --git a/src/main/java/org/elasticsearch/action/ActionFuture.java b/src/main/java/org/elasticsearch/action/ActionFuture.java index bca3730b61b..f2b1d87ee5e 100644 --- a/src/main/java/org/elasticsearch/action/ActionFuture.java +++ b/src/main/java/org/elasticsearch/action/ActionFuture.java @@ -35,29 +35,29 @@ public interface ActionFuture extends Future { /** * Similar to {@link #get()}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

*

Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet() throws ElasticsearchException; + T actionGet(); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

*

Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet(String timeout) throws ElasticsearchException; + T actionGet(String timeout); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

*

Note, the actual cause is unwrapped to the actual failure (for example, unwrapped @@ -66,29 +66,29 @@ public interface ActionFuture extends Future { * * @param timeoutMillis Timeout in millis */ - T actionGet(long timeoutMillis) throws ElasticsearchException; + T actionGet(long timeoutMillis); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

*

Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException; + T actionGet(long timeout, TimeUnit unit); /** * Similar to {@link #get(long, java.util.concurrent.TimeUnit)}, just catching the {@link InterruptedException} and throwing - * an {@link org.elasticsearch.ElasticsearchIllegalStateException} instead. Also catches + * an {@link IllegalStateException} instead. Also catches * {@link java.util.concurrent.ExecutionException} and throws the actual cause instead. *

*

Note, the actual cause is unwrapped to the actual failure (for example, unwrapped * from {@link org.elasticsearch.transport.RemoteTransportException}. The root failure is * still accessible using {@link #getRootFailure()}. */ - T actionGet(TimeValue timeout) throws ElasticsearchException; + T actionGet(TimeValue timeout); /** * The root (possibly) wrapped failure. diff --git a/src/main/java/org/elasticsearch/action/ActionModule.java b/src/main/java/org/elasticsearch/action/ActionModule.java index 21df6223a28..3046d15418f 100644 --- a/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/src/main/java/org/elasticsearch/action/ActionModule.java @@ -28,8 +28,6 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.TransportNodesHotT import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction; import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.TransportNodesShutdownAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.TransportNodesStatsAction; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryAction; @@ -126,14 +124,12 @@ import org.elasticsearch.action.count.CountAction; import org.elasticsearch.action.count.TransportCountAction; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.TransportDeleteAction; -import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.TransportDeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.TransportIndexDeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction; import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.TransportExistsAction; import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.TransportExplainAction; +import org.elasticsearch.action.fieldstats.FieldStatsAction; +import org.elasticsearch.action.fieldstats.TransportFieldStatsTransportAction; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.TransportIndexAction; @@ -220,7 +216,6 @@ public class ActionModule extends AbstractModule { bind(ActionFilters.class).asEagerSingleton(); registerAction(NodesInfoAction.INSTANCE, TransportNodesInfoAction.class); registerAction(NodesStatsAction.INSTANCE, TransportNodesStatsAction.class); - registerAction(NodesShutdownAction.INSTANCE, TransportNodesShutdownAction.class); registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class); registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class); @@ -285,8 +280,6 @@ public class ActionModule extends AbstractModule { TransportShardMultiGetAction.class); registerAction(BulkAction.INSTANCE, TransportBulkAction.class, TransportShardBulkAction.class); - registerAction(DeleteByQueryAction.INSTANCE, TransportDeleteByQueryAction.class, - TransportIndexDeleteByQueryAction.class, TransportShardDeleteByQueryAction.class); registerAction(SearchAction.INSTANCE, TransportSearchAction.class, TransportSearchDfsQueryThenFetchAction.class, TransportSearchQueryThenFetchAction.class, @@ -312,6 +305,8 @@ public class ActionModule extends AbstractModule { registerAction(GetIndexedScriptAction.INSTANCE, TransportGetIndexedScriptAction.class); registerAction(DeleteIndexedScriptAction.INSTANCE, TransportDeleteIndexedScriptAction.class); + registerAction(FieldStatsAction.INSTANCE, TransportFieldStatsTransportAction.class); + // register Name -> GenericAction Map that can be injected to instances. MapBinder actionsBinder = MapBinder.newMapBinder(binder(), String.class, GenericAction.class); diff --git a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java index d52b2cf5923..4335a40e030 100644 --- a/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/ActionRequestBuilder.java @@ -69,21 +69,21 @@ public abstract class ActionRequestBuilder validationErrors = new ArrayList<>(); public ActionRequestValidationException() { - super(null); + super("validation failed"); } public void addValidationError(String error) { diff --git a/src/main/java/org/elasticsearch/action/ActionWriteResponse.java b/src/main/java/org/elasticsearch/action/ActionWriteResponse.java index 63b1f06a9f9..11240cc2cbc 100644 --- a/src/main/java/org/elasticsearch/action/ActionWriteResponse.java +++ b/src/main/java/org/elasticsearch/action/ActionWriteResponse.java @@ -19,16 +19,22 @@ package org.elasticsearch.action; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.bootstrap.Elasticsearch; import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.rest.RestStatus; import java.io.IOException; +import java.util.Collections; /** * Base class for write action responses. @@ -153,6 +159,11 @@ public abstract class ActionWriteResponse extends ActionResponse { return builder; } + @Override + public String toString() { + return Strings.toString(this); + } + public static ShardInfo readShardInfo(StreamInput in) throws IOException { ShardInfo shardInfo = new ShardInfo(); shardInfo.readFrom(in); @@ -164,15 +175,15 @@ public abstract class ActionWriteResponse extends ActionResponse { private String index; private int shardId; private String nodeId; - private String reason; + private Throwable cause; private RestStatus status; private boolean primary; - public Failure(String index, int shardId, @Nullable String nodeId, String reason, RestStatus status, boolean primary) { + public Failure(String index, int shardId, @Nullable String nodeId, Throwable cause, RestStatus status, boolean primary) { this.index = index; this.shardId = shardId; this.nodeId = nodeId; - this.reason = reason; + this.cause = cause; this.status = status; this.primary = primary; } @@ -209,7 +220,7 @@ public abstract class ActionWriteResponse extends ActionResponse { */ @Override public String reason() { - return reason; + return ExceptionsHelper.detailedMessage(cause); } /** @@ -233,7 +244,7 @@ public abstract class ActionWriteResponse extends ActionResponse { index = in.readString(); shardId = in.readVInt(); nodeId = in.readOptionalString(); - reason = in.readString(); + cause = in.readThrowable(); status = RestStatus.readFrom(in); primary = in.readBoolean(); } @@ -243,7 +254,7 @@ public abstract class ActionWriteResponse extends ActionResponse { out.writeString(index); out.writeVInt(shardId); out.writeOptionalString(nodeId); - out.writeString(reason); + out.writeThrowable(cause); RestStatus.writeTo(out, status); out.writeBoolean(primary); } @@ -254,7 +265,10 @@ public abstract class ActionWriteResponse extends ActionResponse { builder.field(Fields._INDEX, index); builder.field(Fields._SHARD, shardId); builder.field(Fields._NODE, nodeId); - builder.field(Fields.REASON, reason); + builder.field(Fields.REASON); + builder.startObject(); + ElasticsearchException.toXContent(builder, params, cause); + builder.endObject(); builder.field(Fields.STATUS, status); builder.field(Fields.PRIMARY, primary); builder.endObject(); diff --git a/src/main/java/org/elasticsearch/action/ThreadingModel.java b/src/main/java/org/elasticsearch/action/ThreadingModel.java index 5f87d82c528..a4e020d9f08 100644 --- a/src/main/java/org/elasticsearch/action/ThreadingModel.java +++ b/src/main/java/org/elasticsearch/action/ThreadingModel.java @@ -19,7 +19,6 @@ package org.elasticsearch.action; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * @@ -108,7 +107,7 @@ public enum ThreadingModel { } else if (id == 3) { return OPERATION_LISTENER; } else { - throw new ElasticsearchIllegalArgumentException("No threading model for [" + id + "]"); + throw new IllegalArgumentException("No threading model for [" + id + "]"); } } } diff --git a/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java b/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java index eeabdb99cb3..0813e85960f 100644 --- a/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java +++ b/src/main/java/org/elasticsearch/action/WriteConsistencyLevel.java @@ -19,7 +19,6 @@ package org.elasticsearch.action; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * Write Consistency Level control how many replicas should be active for a write operation to occur (a write operation @@ -53,7 +52,7 @@ public enum WriteConsistencyLevel { } else if (value == 3) { return ALL; } - throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]"); + throw new IllegalArgumentException("No write consistency match [" + value + "]"); } public static WriteConsistencyLevel fromString(String value) { @@ -66,6 +65,6 @@ public enum WriteConsistencyLevel { } else if (value.equals("all")) { return ALL; } - throw new ElasticsearchIllegalArgumentException("No write consistency match [" + value + "]"); + throw new IllegalArgumentException("No write consistency match [" + value + "]"); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java index 50479ee2df0..7936bc1d8fd 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/ClusterHealthStatus.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.health; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * @@ -48,7 +47,7 @@ public enum ClusterHealthStatus { case 2: return RED; default: - throw new ElasticsearchIllegalArgumentException("No cluster health status for value [" + value + "]"); + throw new IllegalArgumentException("No cluster health status for value [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 04bb0afae06..0564c8d2897 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.health; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.IndicesOptions; @@ -45,7 +44,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati @Inject public TransportClusterHealthAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters) { - super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, ClusterHealthAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterHealthRequest.class); this.clusterName = clusterName; } @@ -60,18 +59,13 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati return null; // we want users to be able to call this even when there are global blocks, just to check the health (are there blocks?) } - @Override - protected ClusterHealthRequest newRequest() { - return new ClusterHealthRequest(); - } - @Override protected ClusterHealthResponse newResponse() { return new ClusterHealthResponse(); } @Override - protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterHealthRequest request, final ClusterState unusedState, final ActionListener listener) { if (request.waitForEvents() != null) { final long endTime = System.currentTimeMillis() + request.timeout().millis(); clusterService.submitStateUpdateTask("cluster_health (wait_for_events [" + request.waitForEvents() + "])", request.waitForEvents(), new ProcessedClusterStateUpdateTask() { @@ -146,7 +140,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadOperati @Override public void onClusterServiceClose() { - listener.onFailure(new ElasticsearchIllegalStateException("ClusterService was close during health call")); + listener.onFailure(new IllegalStateException("ClusterService was close during health call")); } @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index 8db432b11e3..fe092d7dc81 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -38,6 +38,11 @@ public class NodesHotThreadsRequest extends NodesOperationRequest nodesInfos = new ArrayList<>(); @@ -70,16 +66,6 @@ public class TransportNodesInfoAction extends TransportNodesOperationAction { +public final class TransportLivenessAction implements TransportRequestHandler { private final ClusterService clusterService; private final ClusterName clusterName; @@ -37,21 +37,11 @@ public final class TransportLivenessAction extends BaseTransportRequestHandler { - - String[] nodesIds = Strings.EMPTY_ARRAY; - - TimeValue delay = TimeValue.timeValueSeconds(1); - - boolean exit = true; - - NodesShutdownRequest() { - } - - public NodesShutdownRequest(String... nodesIds) { - this.nodesIds = nodesIds; - } - - public NodesShutdownRequest nodesIds(String... nodesIds) { - this.nodesIds = nodesIds; - return this; - } - - /** - * The delay for the shutdown to occur. Defaults to 1s. - */ - public NodesShutdownRequest delay(TimeValue delay) { - this.delay = delay; - return this; - } - - public TimeValue delay() { - return this.delay; - } - - /** - * The delay for the shutdown to occur. Defaults to 1s. - */ - public NodesShutdownRequest delay(String delay) { - return delay(TimeValue.parseTimeValue(delay, null)); - } - - /** - * Should the JVM be exited as well or not. Defaults to true. - */ - public NodesShutdownRequest exit(boolean exit) { - this.exit = exit; - return this; - } - - /** - * Should the JVM be exited as well or not. Defaults to true. - */ - public boolean exit() { - return exit; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - delay = readTimeValue(in); - nodesIds = in.readStringArray(); - exit = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - delay.writeTo(out); - out.writeStringArrayNullable(nodesIds); - out.writeBoolean(exit); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java deleted file mode 100644 index fc0f767f02c..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownRequestBuilder.java +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; -import org.elasticsearch.client.ClusterAdminClient; -import org.elasticsearch.common.unit.TimeValue; - -/** - * - */ -public class NodesShutdownRequestBuilder extends MasterNodeOperationRequestBuilder { - - public NodesShutdownRequestBuilder(ClusterAdminClient clusterClient) { - super(clusterClient, new NodesShutdownRequest()); - } - - /** - * The nodes ids to restart. - */ - public NodesShutdownRequestBuilder setNodesIds(String... nodesIds) { - request.nodesIds(nodesIds); - return this; - } - - /** - * The delay for the restart to occur. Defaults to 1s. - */ - public NodesShutdownRequestBuilder setDelay(TimeValue delay) { - request.delay(delay); - return this; - } - - /** - * The delay for the restart to occur. Defaults to 1s. - */ - public NodesShutdownRequestBuilder setDelay(String delay) { - request.delay(delay); - return this; - } - - /** - * Should the JVM be exited as well or not. Defaults to true. - */ - public NodesShutdownRequestBuilder setExit(boolean exit) { - request.exit(exit); - return this; - } - - @Override - protected void doExecute(ActionListener listener) { - client.nodesShutdown(request, listener); - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java deleted file mode 100644 index 34fc6891b23..00000000000 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/TransportNodesShutdownAction.java +++ /dev/null @@ -1,320 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.admin.cluster.node.shutdown; - -import com.carrotsearch.hppc.ObjectOpenHashSet; -import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.node.Node; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; - -import java.io.IOException; -import java.util.concurrent.CountDownLatch; - -/** - * - */ -public class TransportNodesShutdownAction extends TransportMasterNodeOperationAction { - - public static final String SHUTDOWN_NODE_ACTION_NAME = NodesShutdownAction.NAME + "[n]"; - - private final Node node; - private final ClusterName clusterName; - private final boolean disabled; - private final TimeValue delay; - - @Inject - public TransportNodesShutdownAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - Node node, ClusterName clusterName, ActionFilters actionFilters) { - super(settings, NodesShutdownAction.NAME, transportService, clusterService, threadPool, actionFilters); - this.node = node; - this.clusterName = clusterName; - this.disabled = settings.getAsBoolean("action.disable_shutdown", this.settings.getAsBoolean("action.admin.cluster.node.shutdown.disabled", false)); - this.delay = this.settings.getAsTime("action.admin.cluster.node.shutdown.delay", TimeValue.timeValueMillis(200)); - - this.transportService.registerHandler(SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequestHandler()); - } - - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - - @Override - protected ClusterBlockException checkBlock(NodesShutdownRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); - } - - @Override - protected NodesShutdownRequest newRequest() { - return new NodesShutdownRequest(); - } - - @Override - protected NodesShutdownResponse newResponse() { - return new NodesShutdownResponse(); - } - - @Override - protected void processBeforeDelegationToMaster(NodesShutdownRequest request, ClusterState state) { - String[] nodesIds = request.nodesIds; - if (nodesIds != null) { - for (int i = 0; i < nodesIds.length; i++) { - // replace the _local one, since it looses its meaning when going over to the master... - if ("_local".equals(nodesIds[i])) { - nodesIds[i] = state.nodes().localNodeId(); - } - } - } - } - - @Override - protected void masterOperation(final NodesShutdownRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { - if (disabled) { - throw new ElasticsearchIllegalStateException("Shutdown is disabled"); - } - final ObjectOpenHashSet nodes = new ObjectOpenHashSet<>(); - if (state.nodes().isAllNodes(request.nodesIds)) { - logger.info("[cluster_shutdown]: requested, shutting down in [{}]", request.delay); - nodes.addAll(state.nodes().dataNodes().values()); - nodes.addAll(state.nodes().masterNodes().values()); - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(request.delay.millis()); - } catch (InterruptedException e) { - // ignore - } - // first, stop the cluster service - logger.trace("[cluster_shutdown]: stopping the cluster service so no re-routing will occur"); - clusterService.stop(); - - final CountDownLatch latch = new CountDownLatch(nodes.size()); - for (ObjectCursor cursor : nodes) { - final DiscoveryNode node = cursor.value; - if (node.id().equals(state.nodes().masterNodeId())) { - // don't shutdown the master yet... - latch.countDown(); - } else { - logger.trace("[cluster_shutdown]: sending shutdown request to [{}]", node); - transportService.sendRequest(node, SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - logger.trace("[cluster_shutdown]: received shutdown response from [{}]", node); - latch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("[cluster_shutdown]: received failed shutdown response from [{}]", exp, node); - latch.countDown(); - } - }); - } - } - try { - latch.await(); - } catch (InterruptedException e) { - // ignore - } - logger.info("[cluster_shutdown]: done shutting down all nodes except master, proceeding to master"); - - // now, kill the master - logger.trace("[cluster_shutdown]: shutting down the master [{}]", state.nodes().masterNode()); - transportService.sendRequest(state.nodes().masterNode(), SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - logger.trace("[cluster_shutdown]: received shutdown response from master"); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("[cluster_shutdown]: received failed shutdown response master", exp); - } - }); - } - }); - t.start(); - } else { - final String[] nodesIds = state.nodes().resolveNodesIds(request.nodesIds); - logger.info("[partial_cluster_shutdown]: requested, shutting down [{}] in [{}]", nodesIds, request.delay); - - for (String nodeId : nodesIds) { - final DiscoveryNode node = state.nodes().get(nodeId); - if (node != null) { - nodes.add(node); - } - } - - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(request.delay.millis()); - } catch (InterruptedException e) { - // ignore - } - - final CountDownLatch latch = new CountDownLatch(nodesIds.length); - for (String nodeId : nodesIds) { - final DiscoveryNode node = state.nodes().get(nodeId); - if (node == null) { - logger.warn("[partial_cluster_shutdown]: no node to shutdown for node_id [{}]", nodeId); - latch.countDown(); - continue; - } - - logger.trace("[partial_cluster_shutdown]: sending shutdown request to [{}]", node); - transportService.sendRequest(node, SHUTDOWN_NODE_ACTION_NAME, new NodeShutdownRequest(request), new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty response) { - logger.trace("[partial_cluster_shutdown]: received shutdown response from [{}]", node); - latch.countDown(); - } - - @Override - public void handleException(TransportException exp) { - logger.warn("[partial_cluster_shutdown]: received failed shutdown response from [{}]", exp, node); - latch.countDown(); - } - }); - } - - try { - latch.await(); - } catch (InterruptedException e) { - // ignore - } - - logger.info("[partial_cluster_shutdown]: done shutting down [{}]", ((Object) nodesIds)); - } - }); - t.start(); - } - listener.onResponse(new NodesShutdownResponse(clusterName, nodes.toArray(DiscoveryNode.class))); - } - - private class NodeShutdownRequestHandler extends BaseTransportRequestHandler { - - @Override - public NodeShutdownRequest newInstance() { - return new NodeShutdownRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(final NodeShutdownRequest request, TransportChannel channel) throws Exception { - if (disabled) { - throw new ElasticsearchIllegalStateException("Shutdown is disabled"); - } - logger.info("shutting down in [{}]", delay); - Thread t = new Thread(new Runnable() { - @Override - public void run() { - try { - Thread.sleep(delay.millis()); - } catch (InterruptedException e) { - // ignore - } - if (!request.exit) { - logger.info("initiating requested shutdown (no exit)..."); - try { - node.close(); - } catch (Exception e) { - logger.warn("Failed to shutdown", e); - } - return; - } - boolean shutdownWithWrapper = false; - if (System.getProperty("elasticsearch-service") != null) { - try { - Class wrapperManager = settings.getClassLoader().loadClass("org.tanukisoftware.wrapper.WrapperManager"); - logger.info("initiating requested shutdown (using service)"); - wrapperManager.getMethod("stopAndReturn", int.class).invoke(null, 0); - shutdownWithWrapper = true; - } catch (Throwable e) { - logger.error("failed to initial shutdown on service wrapper", e); - } - } - if (!shutdownWithWrapper) { - logger.info("initiating requested shutdown..."); - try { - node.close(); - } catch (Exception e) { - logger.warn("Failed to shutdown", e); - } finally { - // make sure we initiate the shutdown hooks, so the Bootstrap#main thread will exit - System.exit(0); - } - } - } - }); - t.start(); - - channel.sendResponse(TransportResponse.Empty.INSTANCE); - } - } - - static class NodeShutdownRequest extends TransportRequest { - - boolean exit; - - NodeShutdownRequest() { - } - - NodeShutdownRequest(NodesShutdownRequest request) { - super(request); - this.exit = request.exit(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - exit = in.readBoolean(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBoolean(exit); - } - } -} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 339b8c4717d..31adc2d6e67 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -49,15 +49,11 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction nodeStats = Lists.newArrayList(); @@ -70,16 +66,6 @@ public class TransportNodesStatsAction extends TransportNodesOperationAction listener) throws ElasticsearchException { + protected void masterOperation(final DeleteRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.unregisterRepository( new RepositoriesService.UnregisterRepositoryRequest("delete_repository [" + request.name() + "]", request.name()) .masterNodeTimeout(request.masterNodeTimeout()).ackTimeout(request.timeout()), diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java index a48d5293b46..fda863eb271 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/get/TransportGetRepositoriesAction.java @@ -45,7 +45,7 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera @Inject public TransportGetRepositoriesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, GetRepositoriesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetRepositoriesRequest.class); } @Override @@ -53,11 +53,6 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera return ThreadPool.Names.MANAGEMENT; } - @Override - protected GetRepositoriesRequest newRequest() { - return new GetRepositoriesRequest(); - } - @Override protected GetRepositoriesResponse newResponse() { return new GetRepositoriesResponse(); @@ -65,11 +60,11 @@ public class TransportGetRepositoriesAction extends TransportMasterNodeReadOpera @Override protected ClusterBlockException checkBlock(GetRepositoriesRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, ""); } @Override - protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final GetRepositoriesRequest request, ClusterState state, final ActionListener listener) { MetaData metaData = state.metaData(); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (request.repositories().length == 0 || (request.repositories().length == 1 && "_all".equals(request.repositories()[0]))) { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index 8682ee1611f..e0fdc30b3ad 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -20,8 +20,7 @@ package org.elasticsearch.action.admin.cluster.repositories.put; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.Version; + import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.common.bytes.BytesReference; @@ -218,7 +217,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest) entry.getValue()); } @@ -236,7 +235,7 @@ public class PutRepositoryRequest extends AcknowledgedRequest listener) throws ElasticsearchException { + protected void masterOperation(final PutRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.registerRepository( new RepositoriesService.RegisterRepositoryRequest("put_repository [" + request.name() + "]", diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java index 308db280367..6ce60afc74c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/TransportVerifyRepositoryAction.java @@ -47,7 +47,7 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio @Inject public TransportVerifyRepositoryAction(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, RepositoriesService repositoriesService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, VerifyRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, VerifyRepositoryAction.NAME, transportService, clusterService, threadPool, actionFilters, VerifyRepositoryRequest.class); this.repositoriesService = repositoriesService; this.clusterName = clusterName; } @@ -57,11 +57,6 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio return ThreadPool.Names.MANAGEMENT; } - @Override - protected VerifyRepositoryRequest newRequest() { - return new VerifyRepositoryRequest(); - } - @Override protected VerifyRepositoryResponse newResponse() { return new VerifyRepositoryResponse(); @@ -69,11 +64,11 @@ public class TransportVerifyRepositoryAction extends TransportMasterNodeOperatio @Override protected ClusterBlockException checkBlock(VerifyRepositoryRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, ""); } @Override - protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final VerifyRepositoryRequest request, ClusterState state, final ActionListener listener) { repositoriesService.verifyRepository(request.name(), new ActionListener() { @Override public void onResponse(RepositoriesService.VerifyResponse verifyResponse) { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 79b31f620d5..28f9cb1db90 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -68,7 +68,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - ClusterState.Builder.writeTo(state, out); + state.writeTo(out); writeAcknowledged(out); RoutingExplanations.writeTo(explanations, out); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java index 5606983d334..05c46177d86 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/reroute/TransportClusterRerouteAction.java @@ -46,7 +46,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA @Inject public TransportClusterRerouteAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, AllocationService allocationService, ActionFilters actionFilters) { - super(settings, ClusterRerouteAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, ClusterRerouteAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterRerouteRequest.class); this.allocationService = allocationService; } @@ -58,12 +58,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA @Override protected ClusterBlockException checkBlock(ClusterRerouteRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); - } - - @Override - protected ClusterRerouteRequest newRequest() { - return new ClusterRerouteRequest(); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override @@ -72,7 +67,7 @@ public class TransportClusterRerouteAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterRerouteRequest request, final ClusterState state, final ActionListener listener) { clusterService.submitStateUpdateTask("cluster_reroute (api)", Priority.IMMEDIATE, new AckedClusterStateUpdateTask(request, listener) { private volatile ClusterState clusterStateToSend; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java index 40a491de253..526b6afa389 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/settings/TransportClusterUpdateSettingsAction.java @@ -59,7 +59,7 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe @Inject public TransportClusterUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, AllocationService allocationService, @ClusterDynamicSettings DynamicSettings dynamicSettings, ActionFilters actionFilters) { - super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, ClusterUpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterUpdateSettingsRequest.class); this.allocationService = allocationService; this.dynamicSettings = dynamicSettings; } @@ -76,22 +76,17 @@ public class TransportClusterUpdateSettingsAction extends TransportMasterNodeOpe request.persistentSettings().getAsMap().isEmpty() && request.transientSettings().getAsMap().size() == 1 && request.transientSettings().get(MetaData.SETTING_READ_ONLY) != null) { return null; } - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } - @Override - protected ClusterUpdateSettingsRequest newRequest() { - return new ClusterUpdateSettingsRequest(); - } - @Override protected ClusterUpdateSettingsResponse newResponse() { return new ClusterUpdateSettingsResponse(); } @Override - protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterUpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { final ImmutableSettings.Builder transientUpdates = ImmutableSettings.settingsBuilder(); final ImmutableSettings.Builder persistentUpdates = ImmutableSettings.settingsBuilder(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java index b00878d3673..de8e1fcdfab 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.shards; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -61,11 +60,11 @@ public class ClusterSearchShardsRequest extends MasterNodeReadOperationRequest listener) throws ElasticsearchException { + protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener listener) { ClusterState clusterState = clusterService.state(); String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); Map> routingMap = clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index f999740624d..4cafbb2e52d 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.create; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -371,10 +370,6 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest entry : ((Map) source).entrySet()) { String name = entry.getKey(); if (name.equals("indices")) { @@ -383,28 +378,20 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else { - throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings"); + throw new IllegalArgumentException("malformed indices section, should be an array of strings"); } - } else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) { - ignoreUnavailable = nodeBooleanValue(entry.getValue()); - } else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) { - allowNoIndices = nodeBooleanValue(entry.getValue()); - } else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) { - expandWildcardsOpen = nodeBooleanValue(entry.getValue()); - } else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) { - expandWildcardsClosed = nodeBooleanValue(entry.getValue()); } else if (name.equals("partial")) { partial(nodeBooleanValue(entry.getValue())); } else if (name.equals("settings")) { if (!(entry.getValue() instanceof Map)) { - throw new ElasticsearchIllegalArgumentException("malformed settings section, should indices an inner object"); + throw new IllegalArgumentException("malformed settings section, should indices an inner object"); } settings((Map) entry.getValue()); } else if (name.equals("include_global_state")) { includeGlobalState = nodeBooleanValue(entry.getValue()); } } - indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed)); + indicesOptions(IndicesOptions.fromMap((Map) source, IndicesOptions.lenientExpandOpen())); return this; } @@ -419,7 +406,7 @@ public class CreateSnapshotRequest extends MasterNodeOperationRequest listener) throws ElasticsearchException { + protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener listener) { SnapshotsService.SnapshotRequest snapshotRequest = new SnapshotsService.SnapshotRequest("create_snapshot[" + request.snapshot() + "]", request.snapshot(), request.repository()) .indices(request.indices()) diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java index 0e5ec8ed3fb..2b59a09db2b 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/delete/TransportDeleteSnapshotAction.java @@ -43,7 +43,7 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA @Inject public TransportDeleteSnapshotAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, SnapshotsService snapshotsService, ActionFilters actionFilters) { - super(settings, DeleteSnapshotAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, DeleteSnapshotAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteSnapshotRequest.class); this.snapshotsService = snapshotsService; } @@ -52,11 +52,6 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA return ThreadPool.Names.GENERIC; } - @Override - protected DeleteSnapshotRequest newRequest() { - return new DeleteSnapshotRequest(); - } - @Override protected DeleteSnapshotResponse newResponse() { return new DeleteSnapshotResponse(); @@ -64,11 +59,11 @@ public class TransportDeleteSnapshotAction extends TransportMasterNodeOperationA @Override protected ClusterBlockException checkBlock(DeleteSnapshotRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override - protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteSnapshotRequest request, ClusterState state, final ActionListener listener) { SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot()); snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() { @Override diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java index 4420777bd52..dd36800a277 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java @@ -46,7 +46,7 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct @Inject public TransportGetSnapshotsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, SnapshotsService snapshotsService, ActionFilters actionFilters) { - super(settings, GetSnapshotsAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, GetSnapshotsAction.NAME, transportService, clusterService, threadPool, actionFilters, GetSnapshotsRequest.class); this.snapshotsService = snapshotsService; } @@ -55,11 +55,6 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct return ThreadPool.Names.GENERIC; } - @Override - protected GetSnapshotsRequest newRequest() { - return new GetSnapshotsRequest(); - } - @Override protected GetSnapshotsResponse newResponse() { return new GetSnapshotsResponse(); @@ -67,11 +62,11 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeOperationAct @Override protected ClusterBlockException checkBlock(GetSnapshotsRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, ""); } @Override - protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final GetSnapshotsRequest request, ClusterState state, final ActionListener listener) { try { ImmutableList.Builder snapshotInfoBuilder = ImmutableList.builder(); if (isAllSnapshots(request.snapshots())) { diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 5e8c3fe5b62..4be7b39da9a 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; @@ -493,7 +492,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest entry : ((Map) source).entrySet()) { String name = entry.getKey(); if (name.equals("indices")) { @@ -517,21 +511,13 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else { - throw new ElasticsearchIllegalArgumentException("malformed indices section, should be an array of strings"); + throw new IllegalArgumentException("malformed indices section, should be an array of strings"); } - } else if (name.equals("ignore_unavailable") || name.equals("ignoreUnavailable")) { - ignoreUnavailable = nodeBooleanValue(entry.getValue()); - } else if (name.equals("allow_no_indices") || name.equals("allowNoIndices")) { - allowNoIndices = nodeBooleanValue(entry.getValue()); - } else if (name.equals("expand_wildcards_open") || name.equals("expandWildcardsOpen")) { - expandWildcardsOpen = nodeBooleanValue(entry.getValue()); - } else if (name.equals("expand_wildcards_closed") || name.equals("expandWildcardsClosed")) { - expandWildcardsClosed = nodeBooleanValue(entry.getValue()); } else if (name.equals("partial")) { partial(nodeBooleanValue(entry.getValue())); } else if (name.equals("settings")) { if (!(entry.getValue() instanceof Map)) { - throw new ElasticsearchIllegalArgumentException("malformed settings section"); + throw new IllegalArgumentException("malformed settings section"); } settings((Map) entry.getValue()); } else if (name.equals("include_global_state")) { @@ -542,17 +528,17 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else if (name.equals("ignore_index_settings")) { @@ -561,13 +547,13 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest) entry.getValue()); } else { - throw new ElasticsearchIllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); + throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); } } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter " + name); + throw new IllegalArgumentException("Unknown parameter " + name); } } - indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed)); + indicesOptions(IndicesOptions.fromMap((Map) source, IndicesOptions.lenientExpandOpen())); return this; } @@ -584,7 +570,7 @@ public class RestoreSnapshotRequest extends MasterNodeOperationRequest listener) throws ElasticsearchException { + protected void masterOperation(final RestoreSnapshotRequest request, ClusterState state, final ActionListener listener) { RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest( "restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(), request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(), diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java index 38be8b24d42..efbc82c9b6a 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStage.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** */ @@ -92,7 +91,7 @@ public enum SnapshotIndexShardStage { case 4: return FAILURE; default: - throw new ElasticsearchIllegalArgumentException("No snapshot shard stage for value [" + value + "]"); + throw new IllegalArgumentException("No snapshot shard stage for value [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java index 4f2ee47e344..878ca704345 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotIndexShardStatus.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,7 +74,7 @@ public class SnapshotIndexShardStatus extends BroadcastShardOperationResponse im stage = SnapshotIndexShardStage.FAILURE; break; default: - throw new ElasticsearchIllegalArgumentException("Unknown stage type " + indexShardStatus.stage()); + throw new IllegalArgumentException("Unknown stage type " + indexShardStatus.stage()); } stats = new SnapshotStats(indexShardStatus); failure = indexShardStatus.failure(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java index 8eb9b35b040..ba3bd7a8c77 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotShardsStats.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; @@ -59,7 +58,7 @@ public class SnapshotShardsStats implements ToXContent { failedShards++; break; default: - throw new ElasticsearchIllegalArgumentException("Unknown stage type " + shard.getStage()); + throw new IllegalArgumentException("Unknown stage type " + shard.getStage()); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index becaacd743b..b23010e3ecc 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -55,30 +55,16 @@ public class TransportNodesSnapshotsStatus extends TransportNodesOperationAction @Inject public TransportNodesSnapshotsStatus(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, SnapshotsService snapshotsService, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); this.snapshotsService = snapshotsService; } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - @Override protected boolean transportCompress() { return true; // compress since the metadata can become large } - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); @@ -108,7 +94,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesOperationAction } @Override - protected NodeSnapshotStatus nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeSnapshotStatus nodeOperation(NodeRequest request) { ImmutableMap.Builder> snapshotMapBuilder = ImmutableMap.builder(); try { String nodeId = clusterService.localNode().id(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index 967ac3808ec..edfc9d5fd32 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.snapshots.status; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; @@ -58,7 +57,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation @Inject public TransportSnapshotsStatusAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, SnapshotsService snapshotsService, TransportNodesSnapshotsStatus transportNodesSnapshotsStatus, ActionFilters actionFilters) { - super(settings, SnapshotsStatusAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, SnapshotsStatusAction.NAME, transportService, clusterService, threadPool, actionFilters, SnapshotsStatusRequest.class); this.snapshotsService = snapshotsService; this.transportNodesSnapshotsStatus = transportNodesSnapshotsStatus; } @@ -70,12 +69,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation @Override protected ClusterBlockException checkBlock(SnapshotsStatusRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); - } - - @Override - protected SnapshotsStatusRequest newRequest() { - return new SnapshotsStatusRequest(); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override @@ -185,7 +179,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation stage = SnapshotIndexShardStage.DONE; break; default: - throw new ElasticsearchIllegalArgumentException("Unknown snapshot state " + shardEntry.getValue().state()); + throw new IllegalArgumentException("Unknown snapshot state " + shardEntry.getValue().state()); } SnapshotIndexShardStatus shardStatus = new SnapshotIndexShardStatus(shardEntry.getKey(), stage); shardStatusBuilder.add(shardStatus); @@ -221,7 +215,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeOperation state = SnapshotMetaData.State.SUCCESS; break; default: - throw new ElasticsearchIllegalArgumentException("Unknown snapshot state " + snapshot.state()); + throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state()); } builder.add(new SnapshotStatus(snapshotId, state, shardStatusBuilder.build())); } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java index 861a84a9e71..e9aa9b723fa 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/ClusterStateResponse.java @@ -62,6 +62,6 @@ public class ClusterStateResponse extends ActionResponse { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); clusterName.writeTo(out); - ClusterState.Builder.writeTo(clusterState, out); + clusterState.writeTo(out); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index e3dedfe6252..5c8905fd97b 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.cluster.state; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; @@ -29,7 +28,6 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData.Custom; @@ -39,11 +37,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.List; - -import static com.google.common.collect.Lists.newArrayList; -import static org.elasticsearch.cluster.metadata.MetaData.lookupFactorySafe; - /** * */ @@ -54,7 +47,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio @Inject public TransportClusterStateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ClusterName clusterName, ActionFilters actionFilters) { - super(settings, ClusterStateAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, ClusterStateAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterStateRequest.class); this.clusterName = clusterName; } @@ -73,22 +66,18 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio return null; } - @Override - protected ClusterStateRequest newRequest() { - return new ClusterStateRequest(); - } - @Override protected ClusterStateResponse newResponse() { return new ClusterStateResponse(); } @Override - protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final ClusterStateRequest request, final ClusterState state, ActionListener listener) { ClusterState currentState = clusterService.state(); logger.trace("Serving cluster state request using version {}", currentState.version()); ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName()); builder.version(currentState.version()); + builder.uuid(currentState.uuid()); if (request.nodes()) { builder.nodes(currentState.nodes()); } @@ -127,10 +116,9 @@ public class TransportClusterStateAction extends TransportMasterNodeReadOperatio } // Filter our metadata that shouldn't be returned by API - for(ObjectCursor type : currentState.metaData().customs().keys()) { - Custom.Factory factory = lookupFactorySafe(type.value); - if(!factory.context().contains(MetaData.XContentContext.API)) { - mdBuilder.removeCustom(type.value); + for(ObjectObjectCursor custom : currentState.metaData().customs()) { + if(!custom.value.context().contains(MetaData.XContentContext.API)) { + mdBuilder.removeCustom(custom.key); } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java index 2f0b0e7d4f2..d2395abf5f8 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsIndices.java @@ -21,6 +21,7 @@ package org.elasticsearch.action.admin.cluster.stats; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; + import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java index 287ca7253f8..3a0c26af10c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsRequest.java @@ -30,6 +30,9 @@ import java.io.IOException; */ public class ClusterStatsRequest extends NodesOperationRequest { + ClusterStatsRequest() { + } + /** * Get stats from nodes based on the nodes ids specified. If none are passed, stats * based on all nodes will be returned. diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a123bccf6b8..7f7d00ad529 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -67,16 +67,12 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction nodeStats = new ArrayList<>(responses.length()); @@ -90,16 +86,6 @@ public class TransportClusterStatsAction extends TransportNodesOperationAction shardsStats = new ArrayList<>(); diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java index 2e46a5af070..938f176e35c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java +++ b/src/main/java/org/elasticsearch/action/admin/cluster/tasks/TransportPendingClusterTasksAction.java @@ -41,7 +41,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO @Inject public TransportPendingClusterTasksAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, PendingClusterTasksAction.NAME, transportService, clusterService, threadPool, actionFilters, PendingClusterTasksRequest.class); this.clusterService = clusterService; } @@ -53,12 +53,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO @Override protected ClusterBlockException checkBlock(PendingClusterTasksRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); - } - - @Override - protected PendingClusterTasksRequest newRequest() { - return new PendingClusterTasksRequest(); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override @@ -67,7 +62,7 @@ public class TransportPendingClusterTasksAction extends TransportMasterNodeReadO } @Override - protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(PendingClusterTasksRequest request, ClusterState state, ActionListener listener) { listener.onResponse(new PendingClusterTasksResponse(clusterService.pendingTasks())); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java index bd5934dbfbd..15cd244ba23 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/Alias.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.alias; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -187,7 +186,7 @@ public class Alias implements Streamable { String currentFieldName = null; XContentParser.Token token = parser.nextToken(); if (token == null) { - throw new ElasticsearchIllegalArgumentException("No alias is specified"); + throw new IllegalArgumentException("No alias is specified"); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java index 360765c9358..e2bf884428a 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/TransportIndicesAliasesAction.java @@ -50,7 +50,7 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA @Inject public TransportIndicesAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexAliasesService indexAliasesService, ActionFilters actionFilters) { - super(settings, IndicesAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, IndicesAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, IndicesAliasesRequest.class); this.indexAliasesService = indexAliasesService; } @@ -60,11 +60,6 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA return ThreadPool.Names.SAME; } - @Override - protected IndicesAliasesRequest newRequest() { - return new IndicesAliasesRequest(); - } - @Override protected IndicesAliasesResponse newResponse() { return new IndicesAliasesResponse(); @@ -78,11 +73,11 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeOperationA indices.add(index); } } - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, indices.toArray(new String[indices.size()])); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices.toArray(new String[indices.size()])); } @Override - protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final IndicesAliasesRequest request, final ClusterState state, final ActionListener listener) { //Expand the indices names List actions = request.aliasActions(); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java index 327a5e16e0f..675d0d88922 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/exists/TransportAliasesExistAction.java @@ -38,7 +38,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio @Inject public TransportAliasesExistAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, AliasesExistAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, AliasesExistAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest.class); } @Override @@ -49,12 +49,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio @Override protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); - } - - @Override - protected GetAliasesRequest newRequest() { - return new GetAliasesRequest(); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override @@ -63,7 +58,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadOperatio } @Override - protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices); listener.onResponse(new AliasesExistResponse(result)); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java index 765a9395afc..106e864a367 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/GetAliasesResponse.java @@ -74,7 +74,7 @@ public class GetAliasesResponse extends ActionResponse { out.writeString(entry.key); out.writeVInt(entry.value.size()); for (AliasMetaData aliasMetaData : entry.value) { - AliasMetaData.Builder.writeTo(aliasMetaData, out); + aliasMetaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java index dcb817f2191..095aee369aa 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/alias/get/TransportGetAliasesAction.java @@ -41,7 +41,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA @Inject public TransportGetAliasesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, GetAliasesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetAliasesRequest.class); } @Override @@ -52,12 +52,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA @Override protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); - } - - @Override - protected GetAliasesRequest newRequest() { - return new GetAliasesRequest(); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override @@ -66,7 +61,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadOperationA } @Override - protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); @SuppressWarnings("unchecked") // ImmutableList to List results incompatible type ImmutableOpenMap> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java index 405d79cbd0d..f1c9afe84a4 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportAnalyzeAction.java @@ -26,9 +26,7 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute; import org.apache.lucene.analysis.tokenattributes.TypeAttribute; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.single.custom.TransportSingleCustomOperationAction; import org.elasticsearch.cluster.ClusterService; @@ -47,8 +45,6 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.analysis.IndicesAnalysisService; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; -import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportService; import java.io.IOException; @@ -60,7 +56,6 @@ import java.util.List; public class TransportAnalyzeAction extends TransportSingleCustomOperationAction { private final IndicesService indicesService; - private final IndicesAnalysisService indicesAnalysisService; private static final Settings DEFAULT_SETTINGS = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(); @@ -68,20 +63,9 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction @Inject public TransportAnalyzeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, IndicesAnalysisService indicesAnalysisService, ActionFilters actionFilters) { - super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, AnalyzeAction.NAME, threadPool, clusterService, transportService, actionFilters, AnalyzeRequest.class, ThreadPool.Names.INDEX); this.indicesService = indicesService; this.indicesAnalysisService = indicesAnalysisService; - transportService.registerHandler(AnalyzeAction.NAME, new TransportHandler()); - } - - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - - @Override - protected AnalyzeRequest newRequest() { - return new AnalyzeRequest(); } @Override @@ -112,7 +96,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction } @Override - protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) throws ElasticsearchException { + protected AnalyzeResponse shardOperation(AnalyzeRequest request, ShardId shardId) { IndexService indexService = null; if (shardId != null) { indexService = indicesService.indexServiceSafe(shardId.getIndex()); @@ -122,12 +106,12 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction String field = null; if (request.field() != null) { if (indexService == null) { - throw new ElasticsearchIllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter"); + throw new IllegalArgumentException("No index provided, and trying to analyzer based on a specific field which requires the index parameter"); } FieldMapper fieldMapper = indexService.mapperService().smartNameFieldMapper(request.field()); if (fieldMapper != null) { if (fieldMapper.isNumeric()) { - throw new ElasticsearchIllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields"); + throw new IllegalArgumentException("Can't process field [" + request.field() + "], Analysis requests are not supported on numeric fields"); } analyzer = fieldMapper.indexAnalyzer(); field = fieldMapper.names().indexName(); @@ -148,20 +132,20 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction analyzer = indexService.analysisService().analyzer(request.analyzer()); } if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]"); + throw new IllegalArgumentException("failed to find analyzer [" + request.analyzer() + "]"); } } else if (request.tokenizer() != null) { TokenizerFactory tokenizerFactory; if (indexService == null) { TokenizerFactoryFactory tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(request.tokenizer()); if (tokenizerFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]"); + throw new IllegalArgumentException("failed to find global tokenizer under [" + request.tokenizer() + "]"); } tokenizerFactory = tokenizerFactoryFactory.create(request.tokenizer(), DEFAULT_SETTINGS); } else { tokenizerFactory = indexService.analysisService().tokenizer(request.tokenizer()); if (tokenizerFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]"); + throw new IllegalArgumentException("failed to find tokenizer under [" + request.tokenizer() + "]"); } } @@ -173,17 +157,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction if (indexService == null) { TokenFilterFactoryFactory tokenFilterFactoryFactory = indicesAnalysisService.tokenFilterFactoryFactory(tokenFilterName); if (tokenFilterFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]"); + throw new IllegalArgumentException("failed to find global token filter under [" + tokenFilterName + "]"); } tokenFilterFactories[i] = tokenFilterFactoryFactory.create(tokenFilterName, DEFAULT_SETTINGS); } else { tokenFilterFactories[i] = indexService.analysisService().tokenFilter(tokenFilterName); if (tokenFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); + throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); } } if (tokenFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); + throw new IllegalArgumentException("failed to find token filter under [" + tokenFilterName + "]"); } } } @@ -196,17 +180,17 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction if (indexService == null) { CharFilterFactoryFactory charFilterFactoryFactory = indicesAnalysisService.charFilterFactoryFactory(charFilterName); if (charFilterFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find global char filter under [" + charFilterName + "]"); + throw new IllegalArgumentException("failed to find global char filter under [" + charFilterName + "]"); } charFilterFactories[i] = charFilterFactoryFactory.create(charFilterName, DEFAULT_SETTINGS); } else { charFilterFactories[i] = indexService.analysisService().charFilter(charFilterName); if (charFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]"); + throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]"); } } if (charFilterFactories[i] == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token char under [" + charFilterName + "]"); + throw new IllegalArgumentException("failed to find token char under [" + charFilterName + "]"); } } } @@ -221,7 +205,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction } } if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer"); + throw new IllegalArgumentException("failed to find analyzer"); } List tokens = Lists.newArrayList(); @@ -234,7 +218,7 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction OffsetAttribute offset = stream.addAttribute(OffsetAttribute.class); TypeAttribute type = stream.addAttribute(TypeAttribute.class); - int position = 0; + int position = -1; while (stream.incrementToken()) { int increment = posIncr.getPositionIncrement(); if (increment > 0) { @@ -260,44 +244,4 @@ public class TransportAnalyzeAction extends TransportSingleCustomOperationAction return new AnalyzeResponse(tokens); } - - private class TransportHandler extends BaseTransportRequestHandler { - - @Override - public AnalyzeRequest newInstance() { - return newRequest(); - } - - @Override - public void messageReceived(AnalyzeRequest request, final TransportChannel channel) throws Exception { - // no need to have a threaded listener since we just send back a response - request.listenerThreaded(false); - // if we have a local operation, execute it on a thread since we don't spawn - request.operationThreaded(true); - execute(request, new ActionListener() { - @Override - public void onResponse(AnalyzeResponse result) { - try { - channel.sendResponse(result); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send response for get", e1); - } - } - }); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java index 59e4f3a8842..3a96c83b3ac 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheRequest.java @@ -37,7 +37,6 @@ public class ClearIndicesCacheRequest extends BroadcastOperationRequest 0) { - clearedAtLeastOne = true; - service.cache().filter().clear("api", request.filterKeys()); - } if (request.fieldDataCache()) { clearedAtLeastOne = true; if (request.fields() == null || request.fields().length == 0) { @@ -175,12 +155,12 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastOperatio @Override protected ClusterBlockException checkGlobalBlock(ClusterState state, ClearIndicesCacheRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); } @Override protected ClusterBlockException checkRequestBlock(ClusterState state, ClearIndicesCacheRequest request, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java index ae2fd4c1450..8c360590a64 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/close/TransportCloseIndexAction.java @@ -47,7 +47,7 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio @Inject public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, CloseIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, CloseIndexRequest.class); this.indexStateService = indexStateService; this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); } @@ -58,11 +58,6 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio return ThreadPool.Names.SAME; } - @Override - protected CloseIndexRequest newRequest() { - return new CloseIndexRequest(); - } - @Override protected CloseIndexResponse newResponse() { return new CloseIndexResponse(); @@ -76,11 +71,11 @@ public class TransportCloseIndexAction extends TransportMasterNodeOperationActio @Override protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override - protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index 67a23d9675b..60a265de785 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.create; import com.google.common.base.Charsets; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -106,14 +105,6 @@ public class CreateIndexRequest extends AcknowledgedRequest if (index == null) { validationException = addValidationError("index is missing", validationException); } - Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); - Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); - if (number_of_primaries != null && number_of_primaries <= 0) { - validationException = addValidationError("index must have 1 or more primary shards", validationException); - } - if (number_of_replicas != null && number_of_replicas < 0) { - validationException = addValidationError("index must have 0 or more replica shards", validationException); - } return validationException; } @@ -247,7 +238,7 @@ public class CreateIndexRequest extends AcknowledgedRequest try { mappings.put(type, source.string()); } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e); + throw new IllegalArgumentException("Failed to build json for mapping request", e); } return this; } @@ -405,11 +396,11 @@ public class CreateIndexRequest extends AcknowledgedRequest aliases((Map) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); - if (factory != null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); + if (proto != null) { found = true; try { - customs.put(name, factory.fromMap((Map) entry.getValue())); + customs.put(name, proto.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -457,7 +448,7 @@ public class CreateIndexRequest extends AcknowledgedRequest int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); customs.put(type, customIndexMetaData); } int aliasesSize = in.readVInt(); @@ -481,7 +472,7 @@ public class CreateIndexRequest extends AcknowledgedRequest out.writeVInt(customs.size()); for (Map.Entry entry : customs.entrySet()) { out.writeString(entry.getKey()); - IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); + entry.getValue().writeTo(out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java index 3a283d68041..18bca510a6e 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/create/TransportCreateIndexAction.java @@ -45,7 +45,7 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi @Inject public TransportCreateIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataCreateIndexService createIndexService, ActionFilters actionFilters) { - super(settings, CreateIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, CreateIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, CreateIndexRequest.class); this.createIndexService = createIndexService; } @@ -55,11 +55,6 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi return ThreadPool.Names.SAME; } - @Override - protected CreateIndexRequest newRequest() { - return new CreateIndexRequest(); - } - @Override protected CreateIndexResponse newResponse() { return new CreateIndexResponse(); @@ -67,11 +62,11 @@ public class TransportCreateIndexAction extends TransportMasterNodeOperationActi @Override protected ClusterBlockException checkBlock(CreateIndexRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, request.index()); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.index()); } @Override - protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final CreateIndexRequest request, final ClusterState state, final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java index 6272926cce7..4fa760c7a9e 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/delete/TransportDeleteIndexAction.java @@ -48,7 +48,7 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi public TransportDeleteIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataDeleteIndexService deleteIndexService, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, DeleteIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteIndexRequest.class); this.deleteIndexService = deleteIndexService; this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); } @@ -58,11 +58,6 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi return ThreadPool.Names.SAME; } - @Override - protected DeleteIndexRequest newRequest() { - return new DeleteIndexRequest(); - } - @Override protected DeleteIndexResponse newResponse() { return new DeleteIndexResponse(); @@ -76,11 +71,11 @@ public class TransportDeleteIndexAction extends TransportMasterNodeOperationActi @Override protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override - protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new DeleteIndexResponse(true)); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java index c5b533df1b7..e104090e962 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/indices/IndicesExistsRequest.java @@ -37,6 +37,11 @@ public class IndicesExistsRequest extends MasterNodeReadOperationRequest listener) throws ElasticsearchException { + protected void masterOperation(final IndicesExistsRequest request, final ClusterState state, final ActionListener listener) { boolean exists; try { // Similar as the previous behaviour, but now also aliases and wildcards are supported. diff --git a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java index b7d10299204..ced21bd98e0 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/exists/types/TransportTypesExistsAction.java @@ -41,7 +41,7 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation @Inject public TransportTypesExistsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, TypesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, TypesExistsAction.NAME, transportService, clusterService, threadPool, actionFilters, TypesExistsRequest.class); } @Override @@ -50,11 +50,6 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation return ThreadPool.Names.SAME; } - @Override - protected TypesExistsRequest newRequest() { - return new TypesExistsRequest(); - } - @Override protected TypesExistsResponse newResponse() { return new TypesExistsResponse(); @@ -62,11 +57,11 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadOperation @Override protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override - protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); if (concreteIndices.length == 0) { listener.onResponse(new TypesExistsResponse(false)); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java index 94fe3a41cae..1d91fd6d800 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportFlushAction.java @@ -52,20 +52,11 @@ public class TransportFlushAction extends TransportBroadcastOperationAction { return this.validNames.contains(name); } - public static Feature fromName(String name) throws ElasticsearchIllegalArgumentException { + public static Feature fromName(String name) { for (Feature feature : Feature.values()) { if (feature.validName(name)) { return feature; } } - throw new ElasticsearchIllegalArgumentException("No feature for name [" + name + "]"); + throw new IllegalArgumentException("No feature for name [" + name + "]"); } - public static Feature fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static Feature fromId(byte id) { if (id < 0 || id >= FEATURES.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return FEATURES[id]; } @@ -104,7 +103,7 @@ public class GetIndexRequest extends ClusterInfoRequest { public GetIndexRequest features(Feature... features) { if (features == null) { - throw new ElasticsearchIllegalArgumentException("features cannot be null"); + throw new IllegalArgumentException("features cannot be null"); } else { this.features = features; } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index 35e6cfa4804..7080a694a11 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -134,7 +134,7 @@ public class GetIndexResponse extends ActionResponse { int valueSize = in.readVInt(); ImmutableOpenMap.Builder mappingEntryBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - mappingEntryBuilder.put(in.readString(), MappingMetaData.readFrom(in)); + mappingEntryBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); } mappingsMapBuilder.put(key, mappingEntryBuilder.build()); } @@ -181,7 +181,7 @@ public class GetIndexResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor mappingEntry : indexEntry.value) { out.writeString(mappingEntry.key); - MappingMetaData.writeTo(mappingEntry.value, out); + mappingEntry.value.writeTo(out); } } out.writeVInt(aliases.size()); @@ -189,7 +189,7 @@ public class GetIndexResponse extends ActionResponse { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (AliasMetaData aliasEntry : indexEntry.value) { - AliasMetaData.Builder.writeTo(aliasEntry, out); + aliasEntry.writeTo(out); } } out.writeVInt(settings.size()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java index 6c0f1271378..db9bfe99133 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/get/TransportGetIndexAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.support.ActionFilters; @@ -49,7 +48,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction listener) throws ElasticsearchException { + final ActionListener listener) { ImmutableOpenMap> warmersResult = ImmutableOpenMap.of(); ImmutableOpenMap> mappingsResult = ImmutableOpenMap.of(); ImmutableOpenMap> aliasesResult = ImmutableOpenMap.of(); @@ -117,7 +111,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction typeMapBuilder = ImmutableOpenMap.builder(); for (int j = 0; j < valueSize; j++) { - typeMapBuilder.put(in.readString(), MappingMetaData.readFrom(in)); + typeMapBuilder.put(in.readString(), MappingMetaData.PROTO.readFrom(in)); } indexMapBuilder.put(key, typeMapBuilder.build()); } @@ -75,7 +75,7 @@ public class GetMappingsResponse extends ActionResponse { out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor typeEntry : indexEntry.value) { out.writeString(typeEntry.key); - MappingMetaData.writeTo(typeEntry.value, out); + typeEntry.value.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java index 4ec4b7c3a59..ee4a6d6f076 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetFieldMappingsAction.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; import org.elasticsearch.transport.TransportChannel; import org.elasticsearch.transport.TransportService; @@ -46,7 +45,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction typeIntersection; @@ -125,13 +121,13 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO } @Override - protected GetFieldMappingsIndexRequest newRequest() { - return new GetFieldMappingsIndexRequest(); + protected GetFieldMappingsResponse newResponse() { + return new GetFieldMappingsResponse(); } @Override - protected GetFieldMappingsResponse newResponse() { - return new GetFieldMappingsResponse(); + protected ClusterBlockException checkRequestBlock(ClusterState state, InternalRequest request) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_READ, request.concreteIndex()); } private static final ToXContent.Params includeDefaultsParams = new ToXContent.Params() { @@ -177,7 +173,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleCustomO } }; - private ImmutableMap findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) throws ElasticsearchException { + private ImmutableMap findFieldMappingsByType(DocumentMapper documentMapper, GetFieldMappingsIndexRequest request) { MapBuilder fieldMappings = new MapBuilder<>(); final DocumentFieldMappers allFieldMappers = documentMapper.mappers(); for (String field : request.fields()) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java index bbd3c5bafcb..9867aeaf80c 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/TransportGetMappingsAction.java @@ -40,7 +40,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction listener) throws ElasticsearchException { + protected void doMasterOperation(final GetMappingsRequest request, String[] concreteIndices, final ClusterState state, final ActionListener listener) { logger.trace("serving getMapping request based on version {}", state.version()); ImmutableOpenMap> result = state.metaData().findMappings( concreteIndices, request.types() diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 7920faa67b2..a2f28a9bcfc 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.indices.mapping.put; import com.carrotsearch.hppc.ObjectOpenHashSet; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -170,7 +169,7 @@ public class PutMappingRequest extends AcknowledgedRequest im for (String s : s1) { String[] s2 = Strings.split(s, "="); if (s2.length != 2) { - throw new ElasticsearchIllegalArgumentException("malformed " + s); + throw new IllegalArgumentException("malformed " + s); } builder.field(s2[0], s2[1]); } @@ -190,7 +189,7 @@ public class PutMappingRequest extends AcknowledgedRequest im for (String s : s1) { String[] s2 = Strings.split(s, "="); if (s2.length != 2) { - throw new ElasticsearchIllegalArgumentException("malformed " + s); + throw new IllegalArgumentException("malformed " + s); } builder.field(s2[0], s2[1]); } @@ -203,7 +202,7 @@ public class PutMappingRequest extends AcknowledgedRequest im builder.endObject(); return builder; } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to generate simplified mapping definition", e); + throw new IllegalArgumentException("failed to generate simplified mapping definition", e); } } @@ -214,7 +213,7 @@ public class PutMappingRequest extends AcknowledgedRequest im try { return source(mappingBuilder.string()); } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to build json for mapping request", e); + throw new IllegalArgumentException("Failed to build json for mapping request", e); } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java index eed118f4b23..dffb1780423 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/TransportPutMappingAction.java @@ -44,7 +44,7 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio @Inject public TransportPutMappingAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataMappingService metaDataMappingService, ActionFilters actionFilters) { - super(settings, PutMappingAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, PutMappingAction.NAME, transportService, clusterService, threadPool, actionFilters, PutMappingRequest.class); this.metaDataMappingService = metaDataMappingService; } @@ -54,11 +54,6 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio return ThreadPool.Names.SAME; } - @Override - protected PutMappingRequest newRequest() { - return new PutMappingRequest(); - } - @Override protected PutMappingResponse newResponse() { return new PutMappingResponse(); @@ -66,11 +61,11 @@ public class TransportPutMappingAction extends TransportMasterNodeOperationActio @Override protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override - protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java b/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java index 729f12b15f0..7a74cc123e1 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/open/TransportOpenIndexAction.java @@ -47,7 +47,7 @@ public class TransportOpenIndexAction extends TransportMasterNodeOperationAction @Inject public TransportOpenIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexStateService indexStateService, NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, OpenIndexAction.NAME, transportService, clusterService, threadPool, actionFilters, OpenIndexRequest.class); this.indexStateService = indexStateService; this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); } @@ -58,11 +58,6 @@ public class TransportOpenIndexAction extends TransportMasterNodeOperationAction return ThreadPool.Names.SAME; } - @Override - protected OpenIndexRequest newRequest() { - return new OpenIndexRequest(); - } - @Override protected OpenIndexResponse newResponse() { return new OpenIndexResponse(); @@ -76,11 +71,11 @@ public class TransportOpenIndexAction extends TransportMasterNodeOperationAction @Override protected ClusterBlockException checkBlock(OpenIndexRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override - protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest() .ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout()) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java b/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java index cc9285aad92..3c99a909e19 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/optimize/TransportOptimizeAction.java @@ -53,20 +53,11 @@ public class TransportOptimizeAction extends TransportBroadcastOperationAction { +public class TransportRecoveryAction extends TransportBroadcastOperationAction { private final IndicesService indicesService; @Inject public TransportRecoveryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters) { - - super(settings, RecoveryAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, RecoveryAction.NAME, threadPool, clusterService, transportService, actionFilters, + RecoveryRequest.class, ShardRecoveryRequest.class, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; } - @Override - protected String executor() { - return ThreadPool.Names.MANAGEMENT; - } - - @Override - protected RecoveryRequest newRequestInstance() { - return new RecoveryRequest(); - } - @Override protected RecoveryResponse newResponse(RecoveryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { @@ -120,14 +109,8 @@ public class TransportRecoveryAction extends } } - RecoveryResponse response = new RecoveryResponse(shardsResponses.length(), successfulShards, + return new RecoveryResponse(shardsResponses.length(), successfulShards, failedShards, request.detailed(), shardResponses, shardFailures); - return response; - } - - @Override - protected ShardRecoveryRequest newShardRequest() { - return new ShardRecoveryRequest(); } @Override @@ -141,7 +124,7 @@ public class TransportRecoveryAction extends } @Override - protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) throws ElasticsearchException { + protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java index ca4492da715..713a14bad47 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/refresh/TransportRefreshAction.java @@ -53,20 +53,11 @@ public class TransportRefreshAction extends TransportBroadcastOperationAction listener) throws ElasticsearchException { + protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); ImmutableOpenMap.Builder indexToSettingsBuilder = ImmutableOpenMap.builder(); for (String concreteIndex : concreteIndices) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java index 34af6b875e8..08195d17e38 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/settings/put/TransportUpdateSettingsAction.java @@ -45,7 +45,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeOperationA @Inject public TransportUpdateSettingsAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataUpdateSettingsService updateSettingsService, ActionFilters actionFilters) { - super(settings, UpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, UpdateSettingsAction.NAME, transportService, clusterService, threadPool, actionFilters, UpdateSettingsRequest.class); this.updateSettingsService = updateSettingsService; } @@ -58,19 +58,14 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeOperationA @Override protected ClusterBlockException checkBlock(UpdateSettingsRequest request, ClusterState state) { // allow for dedicated changes to the metadata blocks, so we don't block those to allow to "re-enable" it - ClusterBlockException globalBlock = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + ClusterBlockException globalBlock = state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); if (globalBlock != null) { return globalBlock; } if (request.settings().getAsMap().size() == 1 && (request.settings().get(IndexMetaData.SETTING_BLOCKS_METADATA) != null || request.settings().get(IndexMetaData.SETTING_READ_ONLY) != null )) { return null; } - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); - } - - @Override - protected UpdateSettingsRequest newRequest() { - return new UpdateSettingsRequest(); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, state.metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override @@ -79,7 +74,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeOperationA } @Override - protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest() .indices(concreteIndices) diff --git a/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java b/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java index d247def46af..4561c80cfd4 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/stats/TransportIndicesStatsAction.java @@ -60,20 +60,11 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi @Inject public TransportIndicesStatsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ActionFilters actionFilters) { - super(settings, IndicesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, IndicesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, + IndicesStatsRequest.class, IndexShardStatsRequest.class, ThreadPool.Names.MANAGEMENT); this.indicesService = indicesService; } - @Override - protected String executor() { - return ThreadPool.Names.MANAGEMENT; - } - - @Override - protected IndicesStatsRequest newRequestInstance() { - return new IndicesStatsRequest(); - } - /** * Status goes across *all* shards. */ @@ -84,12 +75,12 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi @Override protected ClusterBlockException checkGlobalBlock(ClusterState state, IndicesStatsRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override protected ClusterBlockException checkRequestBlock(ClusterState state, IndicesStatsRequest request, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices); } @@ -117,11 +108,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi return new IndicesStatsResponse(shards.toArray(new ShardStats[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures); } - @Override - protected IndexShardStatsRequest newShardRequest() { - return new IndexShardStatsRequest(); - } - @Override protected IndexShardStatsRequest newShardRequest(int numShards, ShardRouting shard, IndicesStatsRequest request) { return new IndexShardStatsRequest(shard.shardId(), request); @@ -133,7 +119,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastOperationActi } @Override - protected ShardStats shardOperation(IndexShardStatsRequest request) throws ElasticsearchException { + protected ShardStats shardOperation(IndexShardStatsRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); // if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java index 9fa695683e0..4e6fe81b7fd 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/delete/TransportDeleteIndexTemplateAction.java @@ -42,7 +42,7 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOpera @Inject public TransportDeleteIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService, ActionFilters actionFilters) { - super(settings, DeleteIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, DeleteIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteIndexTemplateRequest.class); this.indexTemplateService = indexTemplateService; } @@ -52,11 +52,6 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOpera return ThreadPool.Names.SAME; } - @Override - protected DeleteIndexTemplateRequest newRequest() { - return new DeleteIndexTemplateRequest(); - } - @Override protected DeleteIndexTemplateResponse newResponse() { return new DeleteIndexTemplateResponse(); @@ -64,11 +59,11 @@ public class TransportDeleteIndexTemplateAction extends TransportMasterNodeOpera @Override protected ClusterBlockException checkBlock(DeleteIndexTemplateRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override - protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteIndexTemplateRequest request, final ClusterState state, final ActionListener listener) { indexTemplateService.removeTemplates(new MetaDataIndexTemplateService.RemoveRequest(request.name()).masterTimeout(request.masterNodeTimeout()), new MetaDataIndexTemplateService.RemoveListener() { @Override public void onResponse(MetaDataIndexTemplateService.RemoveResponse response) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 56de19872f2..2ce6d8d2c1a 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -60,7 +60,7 @@ public class GetIndexTemplatesResponse extends ActionResponse { super.writeTo(out); out.writeVInt(indexTemplates.size()); for (IndexTemplateMetaData indexTemplate : indexTemplates) { - IndexTemplateMetaData.Builder.writeTo(indexTemplate, out); + indexTemplate.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java index 32ed8cb213b..5ff737c4d28 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/get/TransportGetIndexTemplatesAction.java @@ -44,7 +44,7 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOpe @Inject public TransportGetIndexTemplatesAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, GetIndexTemplatesAction.NAME, transportService, clusterService, threadPool, actionFilters, GetIndexTemplatesRequest.class); } @Override @@ -54,12 +54,7 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOpe @Override protected ClusterBlockException checkBlock(GetIndexTemplatesRequest request, ClusterState state) { - return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA); - } - - @Override - protected GetIndexTemplatesRequest newRequest() { - return new GetIndexTemplatesRequest(); + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); } @Override @@ -68,7 +63,7 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadOpe } @Override - protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener listener) throws ElasticsearchException { + protected void masterOperation(GetIndexTemplatesRequest request, ClusterState state, ActionListener listener) { List results; // If we did not ask for a specific name, then we return all templates diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index b728abf934e..1b752855c20 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.template.put; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -215,7 +214,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest) entry.getValue()); } else if (name.equals("mappings")) { Map mappings = (Map) entry.getValue(); for (Map.Entry entry1 : mappings.entrySet()) { if (!(entry1.getValue() instanceof Map)) { - throw new ElasticsearchIllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping"); + throw new IllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping"); } mapping(entry1.getKey(), (Map) entry1.getValue()); } @@ -293,10 +292,10 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest) entry.getValue()); } else { // maybe custom? - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(name); - if (factory != null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(name); + if (proto != null) { try { - customs.put(name, factory.fromMap((Map) entry.getValue())); + customs.put(name, proto.fromMap((Map) entry.getValue())); } catch (IOException e) { throw new ElasticsearchParseException("failed to parse custom metadata for [" + name + "]"); } @@ -313,7 +312,7 @@ public class PutIndexTemplateRequest extends MasterNodeOperationRequest entry : customs.entrySet()) { out.writeString(entry.getKey()); - IndexMetaData.lookupFactorySafe(entry.getKey()).writeTo(entry.getValue(), out); + entry.getValue().writeTo(out); } out.writeVInt(aliases.size()); for (Alias alias : aliases) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java b/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java index 766fa2349d7..d0b771262c2 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/template/put/TransportPutIndexTemplateAction.java @@ -42,7 +42,7 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeOperatio @Inject public TransportPutIndexTemplateAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataIndexTemplateService indexTemplateService, ActionFilters actionFilters) { - super(settings, PutIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, PutIndexTemplateAction.NAME, transportService, clusterService, threadPool, actionFilters, PutIndexTemplateRequest.class); this.indexTemplateService = indexTemplateService; } @@ -52,11 +52,6 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeOperatio return ThreadPool.Names.SAME; } - @Override - protected PutIndexTemplateRequest newRequest() { - return new PutIndexTemplateRequest(); - } - @Override protected PutIndexTemplateResponse newResponse() { return new PutIndexTemplateResponse(); @@ -64,11 +59,11 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeOperatio @Override protected ClusterBlockException checkBlock(PutIndexTemplateRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA, ""); + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, ""); } @Override - protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutIndexTemplateRequest request, final ClusterState state, final ActionListener listener) { String cause = request.cause(); if (cause.length() == 0) { cause = "api"; diff --git a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java index 9325ba9f87a..99bb447bed8 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/validate/query/TransportValidateQueryAction.java @@ -76,7 +76,8 @@ public class TransportValidateQueryAction extends TransportBroadcastOperationAct @Inject public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters, + ValidateQueryRequest.class, ShardValidateQueryRequest.class, ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; @@ -89,21 +90,6 @@ public class TransportValidateQueryAction extends TransportBroadcastOperationAct super.doExecute(request, listener); } - @Override - protected String executor() { - return ThreadPool.Names.SEARCH; - } - - @Override - protected ValidateQueryRequest newRequestInstance() { - return new ValidateQueryRequest(); - } - - @Override - protected ShardValidateQueryRequest newShardRequest() { - return new ShardValidateQueryRequest(); - } - @Override protected ShardValidateQueryRequest newShardRequest(int numShards, ShardRouting shard, ValidateQueryRequest request) { String[] filteringAliases = clusterService.state().metaData().filteringAliases(shard.index(), request.indices()); @@ -170,7 +156,7 @@ public class TransportValidateQueryAction extends TransportBroadcastOperationAct } @Override - protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) throws ElasticsearchException { + protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) { IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); IndexQueryParserService queryParserService = indexService.queryParserService(); IndexShard indexShard = indexService.shardSafe(request.shardId().id()); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java index 86229582037..17a30e50ce1 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/delete/TransportDeleteWarmerAction.java @@ -52,7 +52,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct @Inject public TransportDeleteWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, DeleteWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, DeleteWarmerRequest.class); } @Override @@ -61,11 +61,6 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct return ThreadPool.Names.SAME; } - @Override - protected DeleteWarmerRequest newRequest() { - return new DeleteWarmerRequest(); - } - @Override protected DeleteWarmerResponse newResponse() { return new DeleteWarmerResponse(); @@ -73,11 +68,11 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeOperationAct @Override protected ClusterBlockException checkBlock(DeleteWarmerRequest request, ClusterState state) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices())); + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices())); } @Override - protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final DeleteWarmerRequest request, final ClusterState state, final ActionListener listener) { final String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.indicesOptions(), request.indices()); clusterService.submitStateUpdateTask("delete_warmer [" + Arrays.toString(request.names()) + "]", new AckedClusterStateUpdateTask(request, listener) { diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java index 929cf05a7a6..b9fe92ea438 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/get/TransportGetWarmersAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.warmer.get; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.info.TransportClusterInfoAction; import org.elasticsearch.cluster.ClusterService; @@ -45,7 +44,7 @@ public class TransportGetWarmersAction extends TransportClusterInfoAction listener) throws ElasticsearchException { + protected void doMasterOperation(final GetWarmersRequest request, String[] concreteIndices, final ClusterState state, final ActionListener listener) { ImmutableOpenMap> result = state.metaData().findWarmers( concreteIndices, request.types(), request.warmers() ); diff --git a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java index 5ccd1f0653e..e92eb3195d7 100644 --- a/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java +++ b/src/main/java/org/elasticsearch/action/admin/indices/warmer/put/TransportPutWarmerAction.java @@ -58,7 +58,7 @@ public class TransportPutWarmerAction extends TransportMasterNodeOperationAction @Inject public TransportPutWarmerAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, TransportSearchAction searchAction, ActionFilters actionFilters) { - super(settings, PutWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters); + super(settings, PutWarmerAction.NAME, transportService, clusterService, threadPool, actionFilters, PutWarmerRequest.class); this.searchAction = searchAction; } @@ -67,11 +67,6 @@ public class TransportPutWarmerAction extends TransportMasterNodeOperationAction return ThreadPool.Names.SAME; } - @Override - protected PutWarmerRequest newRequest() { - return new PutWarmerRequest(); - } - @Override protected PutWarmerResponse newResponse() { return new PutWarmerResponse(); @@ -80,11 +75,17 @@ public class TransportPutWarmerAction extends TransportMasterNodeOperationAction @Override protected ClusterBlockException checkBlock(PutWarmerRequest request, ClusterState state) { String[] concreteIndices = clusterService.state().metaData().concreteIndices(request.searchRequest().indicesOptions(), request.searchRequest().indices()); - return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA, concreteIndices); + ClusterBlockException status = state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices); + if (status != null) { + return status; + } + // PutWarmer executes a SearchQuery before adding the new warmer to the cluster state, + // so we need to check the same block as TransportSearchTypeAction here + return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); } @Override - protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener listener) throws ElasticsearchException { + protected void masterOperation(final PutWarmerRequest request, final ClusterState state, final ActionListener listener) { // first execute the search request, see that its ok... SearchRequest searchRequest = new SearchRequest(request.searchRequest(), request); searchAction.execute(searchRequest, new ActionListener() { diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index 3ffa1ddcb62..c32f02f0022 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -51,7 +51,7 @@ public class BulkItemResponse implements Streamable { this.index = index; this.type = type; this.id = id; - this.message = ExceptionsHelper.detailedMessage(t); + this.message = t.toString(); this.status = ExceptionsHelper.status(t); } diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 9c6d1ac8842..be26f318625 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.bulk; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.delete.DeleteRequest; @@ -271,7 +270,7 @@ public class BulkProcessor implements Closeable { protected void ensureOpen() { if (closed) { - throw new ElasticsearchIllegalStateException("bulk process already closed"); + throw new IllegalStateException("bulk process already closed"); } } diff --git a/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index b6665160121..617c3fc32bd 100644 --- a/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -84,7 +83,7 @@ public class BulkRequest extends ActionRequest implements Composite } else if (request instanceof UpdateRequest) { add((UpdateRequest) request, payload); } else { - throw new ElasticsearchIllegalArgumentException("No support for request [" + request + "]"); + throw new IllegalArgumentException("No support for request [" + request + "]"); } return this; } @@ -294,7 +293,7 @@ public class BulkRequest extends ActionRequest implements Composite } else if (token.isValue()) { if ("_index".equals(currentFieldName)) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in bulk is not allowed"); + throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = parser.text(); } else if ("_type".equals(currentFieldName)) { diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java index 4cdc4887060..66d3965b4f5 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportBulkAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.bulk; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -68,19 +67,15 @@ import java.util.concurrent.atomic.AtomicInteger; public class TransportBulkAction extends HandledTransportAction { private final AutoCreateIndex autoCreateIndex; - private final boolean allowIdGeneration; - private final ClusterService clusterService; - private final TransportShardBulkAction shardBulkAction; - private final TransportCreateIndexAction createIndexAction; @Inject public TransportBulkAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, TransportShardBulkAction shardBulkAction, TransportCreateIndexAction createIndexAction, ActionFilters actionFilters) { - super(settings, BulkAction.NAME, threadPool, transportService, actionFilters); + super(settings, BulkAction.NAME, threadPool, transportService, actionFilters, BulkRequest.class); this.clusterService = clusterService; this.shardBulkAction = shardBulkAction; this.createIndexAction = createIndexAction; @@ -89,33 +84,39 @@ public class TransportBulkAction extends HandledTransportAction listener) { final long startTime = System.currentTimeMillis(); final AtomicArray responses = new AtomicArray<>(bulkRequest.requests.size()); if (autoCreateIndex.needToCheck()) { - final Set indices = Sets.newHashSet(); + // Keep track of all unique indices and all unique types per index for the create index requests: + final Map> indicesAndTypes = new HashMap<>(); for (ActionRequest request : bulkRequest.requests) { if (request instanceof DocumentRequest) { DocumentRequest req = (DocumentRequest) request; - if (!indices.contains(req.index())) { - indices.add(req.index()); + Set types = indicesAndTypes.get(req.index()); + if (types == null) { + indicesAndTypes.put(req.index(), types = new HashSet<>()); } + types.add(req.type()); } else { throw new ElasticsearchException("Parsed unknown request in bulk actions: " + request.getClass().getSimpleName()); } } - final AtomicInteger counter = new AtomicInteger(indices.size()); + final AtomicInteger counter = new AtomicInteger(indicesAndTypes.size()); ClusterState state = clusterService.state(); - for (final String index : indices) { + for (Map.Entry> entry : indicesAndTypes.entrySet()) { + final String index = entry.getKey(); if (autoCreateIndex.shouldAutoCreate(index, state)) { - createIndexAction.execute(new CreateIndexRequest(bulkRequest).index(index).cause("auto(bulk api)").masterNodeTimeout(bulkRequest.timeout()), new ActionListener() { + CreateIndexRequest createIndexRequest = new CreateIndexRequest(bulkRequest); + createIndexRequest.index(index); + for (String type : entry.getValue()) { + createIndexRequest.mapping(type); + } + createIndexRequest.cause("auto(bulk api)"); + createIndexRequest.masterNodeTimeout(bulkRequest.timeout()); + createIndexAction.execute(createIndexRequest, new ActionListener() { @Override public void onResponse(CreateIndexResponse result) { if (counter.decrementAndGet() == 0) { diff --git a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 245d7d16033..59c2614af1b 100644 --- a/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.bulk; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionWriteResponse; @@ -52,9 +51,11 @@ import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.river.RiverIndexName; @@ -82,17 +83,13 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters) { - super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + BulkShardRequest.class, BulkShardRequest.class, ThreadPool.Names.BULK); this.mappingUpdatedAction = mappingUpdatedAction; this.updateHelper = updateHelper; this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); } - @Override - protected String executor() { - return ThreadPool.Names.BULK; - } - @Override protected boolean checkWriteConsistency() { return true; @@ -102,17 +99,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation protected TransportRequestOptions transportOptions() { return BulkAction.INSTANCE.transportOptions(settings); } - - @Override - protected BulkShardRequest newRequestInstance() { - return new BulkShardRequest(); - } - - @Override - protected BulkShardRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected BulkShardResponse newResponseInstance() { return new BulkShardResponse(); @@ -156,7 +142,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } throw (ElasticsearchException) e; } - if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); } else { logger.debug("{} failed to execute bulk item (index) {}", e, shardRequest.shardId, indexRequest); @@ -189,7 +175,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } throw (ElasticsearchException) e; } - if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); } else { logger.debug("{} failed to execute bulk item (delete) {}", e, shardRequest.shardId, deleteRequest); @@ -278,7 +264,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation case UPSERT: case INDEX: IndexRequest indexRequest = updateResult.request(); - if (t instanceof ElasticsearchException && ((ElasticsearchException) t).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); } else { logger.debug("{} failed to execute bulk item (index) {}", t, shardRequest.shardId, indexRequest); @@ -288,7 +274,7 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation break; case DELETE: DeleteRequest deleteRequest = updateResult.request(); - if (t instanceof ElasticsearchException && ((ElasticsearchException) t).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(t) == RestStatus.CONFLICT) { logger.trace("{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); } else { logger.debug("{} failed to execute bulk item (delete) {}", t, shardRequest.shardId, deleteRequest); @@ -304,6 +290,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } } + } else { + throw new IllegalStateException("Unexpected index operation: " + item.request()); } assert item.getPrimaryResponse() != null; @@ -352,23 +340,6 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation } - private void applyMappingUpdate(IndexService indexService, String type, Mapping update) throws Throwable { - // HACK: Rivers seem to have something specific that triggers potential - // deadlocks when doing concurrent indexing. So for now they keep the - // old behaviour of updating mappings locally first and then - // asynchronously notifying the master - // this can go away when rivers are removed - final String indexName = indexService.index().name(); - final String indexUUID = indexService.indexUUID(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - mappingUpdatedAction.updateMappingOnMaster(indexName, indexUUID, type, update, null); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexUUID, type, update); - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - } - } - private WriteResult shardIndexOperation(BulkShardRequest request, IndexRequest indexRequest, ClusterState clusterState, IndexShard indexShard, IndexService indexService, boolean processed) throws Throwable { @@ -387,38 +358,46 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()) .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl()); - long version; - boolean created; - Engine.IndexingOperation op; + final Engine.IndexingOperation operation; if (indexRequest.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - if (index.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, indexRequest.type(), index.parsedDoc().dynamicMappingsUpdate()); - } - indexShard.index(index); - version = index.version(); - op = index; - created = index.created(); + operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, + assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType(); + operation = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - if (create.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, indexRequest.type(), create.parsedDoc().dynamicMappingsUpdate()); - } - indexShard.create(create); - version = create.version(); - op = create; - created = true; } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + final boolean created; + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(indexRequest.type(), new CompressedString(update.toBytes()), true); + created = operation.execute(indexShard); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, indexRequest.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexRequest.type(), update); + created = operation.execute(indexShard); + } + } else { + created = operation.execute(indexShard); + } + // update the version on request so it will happen on the replicas + final long version = operation.version(); indexRequest.versionType(indexRequest.versionType().versionTypeForReplicationAndRecovery()); indexRequest.version(version); assert indexRequest.versionType().validateVersionForWrites(indexRequest.version()); - IndexResponse indexResponse = new IndexResponse(request.index(), indexRequest.type(), indexRequest.id(), version, created); - return new WriteResult(indexResponse, op); + return new WriteResult(indexResponse, operation); } private WriteResult shardDeleteOperation(BulkShardRequest request, DeleteRequest deleteRequest, IndexShard indexShard) { @@ -522,15 +501,15 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation indexShard.indexingService().noopUpdate(updateRequest.type()); return new UpdateResult(translate, updateResponse); default: - throw new ElasticsearchIllegalStateException("Illegal update operation " + translate.operation()); + throw new IllegalStateException("Illegal update operation " + translate.operation()); } } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); - final BulkShardRequest request = shardRequest.request; + protected void shardOperationOnReplica(ShardId shardId, BulkShardRequest request) { + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardId.id()); for (int i = 0; i < request.items().length; i++) { BulkItemRequest item = request.items()[i]; if (item == null || item.isIgnoreOnReplica()) { @@ -542,15 +521,20 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, indexRequest.source()).type(indexRequest.type()).id(indexRequest.id()) .routing(indexRequest.routing()).parent(indexRequest.parent()).timestamp(indexRequest.timestamp()).ttl(indexRequest.ttl()); + final Engine.IndexingOperation operation; if (indexRequest.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); - indexShard.index(index); + operation = indexShard.prepareIndex(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates()); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, + assert indexRequest.opType() == IndexRequest.OpType.CREATE : indexRequest.opType(); + operation = indexShard.prepareCreate(sourceToParse, indexRequest.version(), indexRequest.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates() || indexRequest.canHaveDuplicates(), indexRequest.autoGeneratedId()); - indexShard.create(create); } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); + } + operation.execute(indexShard); } catch (Throwable e) { // if its not an ignore replica failure, we need to make sure to bubble up the failure // so we will fail the shard @@ -570,6 +554,8 @@ public class TransportShardBulkAction extends TransportShardReplicationOperation throw e; } } + } else { + throw new IllegalStateException("Unexpected index operation: " + item.request()); } } diff --git a/src/main/java/org/elasticsearch/action/count/CountRequest.java b/src/main/java/org/elasticsearch/action/count/CountRequest.java index 7233d5ae7fc..a37ba887fd5 100644 --- a/src/main/java/org/elasticsearch/action/count/CountRequest.java +++ b/src/main/java/org/elasticsearch/action/count/CountRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.count; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; @@ -217,7 +216,7 @@ public class CountRequest extends BroadcastOperationRequest { */ public CountRequest terminateAfter(int terminateAfterCount) { if (terminateAfterCount <= 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } this.terminateAfter = terminateAfterCount; return this; diff --git a/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java b/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java index 6d1566797c1..b7b6377e619 100644 --- a/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/count/CountRequestBuilder.java @@ -19,12 +19,14 @@ package org.elasticsearch.action.count; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.QueryBuilder; /** @@ -144,4 +146,19 @@ public class CountRequestBuilder extends BroadcastOperationRequestBuilder { private final IndicesService indicesService; - private final ScriptService scriptService; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; @Inject public TransportCountAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, CountAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, CountAction.NAME, threadPool, clusterService, transportService, actionFilters, + CountRequest.class, ShardCountRequest.class, ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; @@ -89,21 +87,6 @@ public class TransportCountAction extends TransportBroadcastOperationAction { private final AutoCreateIndex autoCreateIndex; - private final TransportCreateIndexAction createIndexAction; @Inject public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction, ActionFilters actionFilters) { - super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + DeleteRequest.class, DeleteRequest.class, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; this.autoCreateIndex = new AutoCreateIndex(settings); } - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - @Override protected void doExecute(final DeleteRequest request, final ActionListener listener) { if (autoCreateIndex.shouldAutoCreate(request.index(), clusterService.state())) { @@ -107,7 +102,7 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct if (request.request().routing() == null) { if (request.request().versionType() != VersionType.INTERNAL) { // TODO: implement this feature - throw new ElasticsearchIllegalArgumentException("routing value is required for deleting documents of type [" + request.request().type() + throw new IllegalArgumentException("routing value is required for deleting documents of type [" + request.request().type() + "] while using version_type [" + request.request().versionType() + "]"); } throw new RoutingMissingException(request.concreteIndex(), request.request().type(), request.request().id()); @@ -125,16 +120,6 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct return true; } - @Override - protected DeleteRequest newRequestInstance() { - return new DeleteRequest(); - } - - @Override - protected DeleteRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected DeleteResponse newResponseInstance() { return new DeleteResponse(); @@ -165,9 +150,8 @@ public class TransportDeleteAction extends TransportShardReplicationOperationAct } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - DeleteRequest request = shardRequest.request; - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); + protected void shardOperationOnReplica(ShardId shardId, DeleteRequest request) { + IndexShard indexShard = indicesService.indexServiceSafe(shardId.getIndex()).shardSafe(shardId.id()); Engine.Delete delete = indexShard.prepareDelete(request.type(), request.id(), request.version(), request.versionType(), Engine.Operation.Origin.REPLICA); indexShard.delete(delete); diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java deleted file mode 100644 index 7386c9e9449..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequest.java +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import com.google.common.base.Charsets; -import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.support.QuerySourceBuilder; -import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequest; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; - -import java.io.IOException; -import java.util.Arrays; -import java.util.Map; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * A request to delete all documents that matching a specific query. Best created with - * {@link org.elasticsearch.client.Requests#deleteByQueryRequest(String...)}. - *

- *

The request requires the source to be set either using {@link #source(QuerySourceBuilder)}, - * or {@link #source(byte[])}. - * - * @see DeleteByQueryResponse - * @see org.elasticsearch.client.Requests#deleteByQueryRequest(String...) - * @see org.elasticsearch.client.Client#deleteByQuery(DeleteByQueryRequest) - */ -public class DeleteByQueryRequest extends IndicesReplicationOperationRequest { - - private BytesReference source; - - private String[] types = Strings.EMPTY_ARRAY; - @Nullable - private String routing; - - /** - * Constructs a new delete by query request to run against the provided indices. No indices means - * it will run against all indices. - */ - public DeleteByQueryRequest(String... indices) { - this.indices = indices; - } - - public DeleteByQueryRequest() { - } - - /** - * Copy constructor that creates a new delete by query request that is a copy of the one provided as an argument. - * The new request will inherit though headers and context from the original request that caused it. - */ - public DeleteByQueryRequest(ActionRequest originalRequest) { - super(originalRequest); - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); - if (source == null) { - validationException = addValidationError("source is missing", validationException); - } - return validationException; - } - - /** - * The source to execute. - */ - public BytesReference source() { - return source; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequest source(QuerySourceBuilder sourceBuilder) { - this.source = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE); - return this; - } - - /** - * The source to execute. It is preferable to use either {@link #source(byte[])} - * or {@link #source(QuerySourceBuilder)}. - */ - public DeleteByQueryRequest source(String query) { - this.source = new BytesArray(query.getBytes(Charsets.UTF_8)); - return this; - } - - /** - * The source to execute in the form of a map. - */ - @SuppressWarnings("unchecked") - public DeleteByQueryRequest source(Map source) { - try { - XContentBuilder builder = XContentFactory.contentBuilder(Requests.CONTENT_TYPE); - builder.map(source); - return source(builder); - } catch (IOException e) { - throw new ElasticsearchGenerationException("Failed to generate [" + source + "]", e); - } - } - - public DeleteByQueryRequest source(XContentBuilder builder) { - this.source = builder.bytes(); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequest source(byte[] source) { - return source(source, 0, source.length); - } - - /** - * The source to execute. - */ - public DeleteByQueryRequest source(byte[] source, int offset, int length) { - return source(new BytesArray(source, offset, length)); - } - - public DeleteByQueryRequest source(BytesReference source) { - this.source = source; - return this; - } - - /** - * The types of documents the query will run against. Defaults to all types. - */ - public String[] types() { - return this.types; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public String routing() { - return this.routing; - } - - /** - * A comma separated list of routing values to control the shards the search will be executed on. - */ - public DeleteByQueryRequest routing(String routing) { - this.routing = routing; - return this; - } - - /** - * The routing values to control the shards that the search will be executed on. - */ - public DeleteByQueryRequest routing(String... routings) { - this.routing = Strings.arrayToCommaDelimitedString(routings); - return this; - } - - /** - * The types of documents the query will run against. Defaults to all types. - */ - public DeleteByQueryRequest types(String... types) { - this.types = types; - return this; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - source = in.readBytesReference(); - routing = in.readOptionalString(); - types = in.readStringArray(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBytesReference(source); - out.writeOptionalString(routing); - out.writeStringArray(types); - } - - @Override - public String toString() { - String sSource = "_na_"; - try { - sSource = XContentHelper.convertToJson(source, false); - } catch (Exception e) { - // ignore - } - return "[" + Arrays.toString(indices) + "][" + Arrays.toString(types) + "], source[" + sSource + "]"; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java deleted file mode 100644 index ea29d3df92a..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryRequestBuilder.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.support.QuerySourceBuilder; -import org.elasticsearch.action.support.replication.IndicesReplicationOperationRequestBuilder; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.index.query.QueryBuilder; - -import java.util.Map; - -/** - * - */ -public class DeleteByQueryRequestBuilder extends IndicesReplicationOperationRequestBuilder { - - private QuerySourceBuilder sourceBuilder; - - public DeleteByQueryRequestBuilder(Client client) { - super(client, new DeleteByQueryRequest()); - } - - /** - * The types of documents the query will run against. Defaults to all types. - */ - public DeleteByQueryRequestBuilder setTypes(String... types) { - request.types(types); - return this; - } - - /** - * A comma separated list of routing values to control the shards the action will be executed on. - */ - public DeleteByQueryRequestBuilder setRouting(String routing) { - request.routing(routing); - return this; - } - - /** - * The routing values to control the shards that the action will be executed on. - */ - public DeleteByQueryRequestBuilder setRouting(String... routing) { - request.routing(routing); - return this; - } - - - /** - * The query to delete documents for. - * - * @see org.elasticsearch.index.query.QueryBuilders - */ - public DeleteByQueryRequestBuilder setQuery(QueryBuilder queryBuilder) { - sourceBuilder().setQuery(queryBuilder); - return this; - } - - /** - * The source to execute. It is preferable to use either {@link #setSource(byte[])} - * or {@link #setQuery(QueryBuilder)}. - */ - public DeleteByQueryRequestBuilder setSource(String source) { - request().source(source); - return this; - } - - /** - * The source to execute in the form of a map. - */ - public DeleteByQueryRequestBuilder setSource(Map source) { - request().source(source); - return this; - } - - /** - * The source to execute in the form of a builder. - */ - public DeleteByQueryRequestBuilder setSource(XContentBuilder builder) { - request().source(builder); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequestBuilder setSource(byte[] source) { - request().source(source); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequestBuilder setSource(BytesReference source) { - request().source(source); - return this; - } - - /** - * The source to execute. - */ - public DeleteByQueryRequestBuilder setSource(byte[] source, int offset, int length) { - request().source(source, offset, length); - return this; - } - - @Override - public DeleteByQueryRequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) { - request.consistencyLevel(consistencyLevel); - return this; - } - - @Override - protected void doExecute(ActionListener listener) { - if (sourceBuilder != null) { - request.source(sourceBuilder); - } - - client.deleteByQuery(request, listener); - } - - private QuerySourceBuilder sourceBuilder() { - if (sourceBuilder == null) { - sourceBuilder = new QuerySourceBuilder(); - } - return sourceBuilder; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java deleted file mode 100644 index b65744f40a2..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryResponse.java +++ /dev/null @@ -1,93 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.rest.RestStatus; - -import java.io.IOException; -import java.util.Iterator; -import java.util.Map; - -import static com.google.common.collect.Maps.newHashMap; - -/** - * The response of delete by query action. Holds the {@link IndexDeleteByQueryResponse}s from all the - * different indices. - */ -public class DeleteByQueryResponse extends ActionResponse implements Iterable { - - private Map indices = newHashMap(); - - DeleteByQueryResponse() { - - } - - @Override - public Iterator iterator() { - return indices.values().iterator(); - } - - /** - * The responses from all the different indices. - */ - public Map getIndices() { - return indices; - } - - /** - * The response of a specific index. - */ - public IndexDeleteByQueryResponse getIndex(String index) { - return indices.get(index); - } - - public RestStatus status() { - RestStatus status = RestStatus.OK; - for (IndexDeleteByQueryResponse indexResponse : indices.values()) { - if (indexResponse.getShardInfo().status().getStatus() > status.getStatus()) { - status = indexResponse.getShardInfo().status(); - } - } - return status; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - IndexDeleteByQueryResponse response = new IndexDeleteByQueryResponse(); - response.readFrom(in); - indices.put(response.getIndex(), response); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeVInt(indices.size()); - for (IndexDeleteByQueryResponse indexResponse : indices.values()) { - indexResponse.writeTo(out); - } - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java deleted file mode 100644 index 94c58c8b087..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryRequest.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.support.replication.IndexReplicationOperationRequest; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.bytes.BytesReference; - -import java.util.Set; - -/** - * Delete by query request to execute on a specific index. - */ -class IndexDeleteByQueryRequest extends IndexReplicationOperationRequest { - - private final BytesReference source; - private final String[] types; - @Nullable - private final Set routing; - @Nullable - private final String[] filteringAliases; - private final long nowInMillis; - - IndexDeleteByQueryRequest(DeleteByQueryRequest request, String index, @Nullable Set routing, @Nullable String[] filteringAliases, - long nowInMillis) { - super(index, request.timeout(), request.consistencyLevel(), request.indices(), request.indicesOptions(), request); - this.source = request.source(); - this.types = request.types(); - this.routing = routing; - this.filteringAliases = filteringAliases; - this.nowInMillis = nowInMillis; - } - - BytesReference source() { - return source; - } - - Set routing() { - return this.routing; - } - - String[] types() { - return this.types; - } - - String[] filteringAliases() { - return filteringAliases; - } - - long nowInMillis() { - return nowInMillis; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java b/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java deleted file mode 100644 index c16bf57e4b0..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryRequest.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.OriginalIndices; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.XContentHelper; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Set; - -import static org.elasticsearch.action.ValidateActions.addValidationError; - -/** - * Delete by query request to execute on a specific shard. - */ -public class ShardDeleteByQueryRequest extends ShardReplicationOperationRequest { - - private int shardId; - private BytesReference source; - private String[] types = Strings.EMPTY_ARRAY; - @Nullable - private Set routing; - @Nullable - private String[] filteringAliases; - private long nowInMillis; - - private OriginalIndices originalIndices; - - ShardDeleteByQueryRequest(IndexDeleteByQueryRequest request, int shardId) { - super(request); - this.index = request.index(); - this.source = request.source(); - this.types = request.types(); - this.shardId = shardId; - consistencyLevel(request.consistencyLevel()); - timeout = request.timeout(); - this.routing = request.routing(); - filteringAliases = request.filteringAliases(); - nowInMillis = request.nowInMillis(); - this.originalIndices = new OriginalIndices(request); - } - - ShardDeleteByQueryRequest() { - } - - @Override - public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = super.validate(); - if (source == null) { - addValidationError("source is missing", validationException); - } - return validationException; - } - - public int shardId() { - return this.shardId; - } - - BytesReference source() { - return source; - } - - public String[] types() { - return this.types; - } - - public Set routing() { - return this.routing; - } - - public String[] filteringAliases() { - return filteringAliases; - } - - long nowInMillis() { - return nowInMillis; - } - - @Override - public String[] indices() { - return originalIndices.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return originalIndices.indicesOptions(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - source = in.readBytesReference(); - shardId = in.readVInt(); - types = in.readStringArray(); - int routingSize = in.readVInt(); - if (routingSize > 0) { - routing = new HashSet<>(routingSize); - for (int i = 0; i < routingSize; i++) { - routing.add(in.readString()); - } - } - int aliasesSize = in.readVInt(); - if (aliasesSize > 0) { - filteringAliases = new String[aliasesSize]; - for (int i = 0; i < aliasesSize; i++) { - filteringAliases[i] = in.readString(); - } - } - - nowInMillis = in.readVLong(); - originalIndices = OriginalIndices.readOriginalIndices(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeBytesReference(source); - out.writeVInt(shardId); - out.writeStringArray(types); - if (routing != null) { - out.writeVInt(routing.size()); - for (String r : routing) { - out.writeString(r); - } - } else { - out.writeVInt(0); - } - if (filteringAliases != null) { - out.writeVInt(filteringAliases.length); - for (String alias : filteringAliases) { - out.writeString(alias); - } - } else { - out.writeVInt(0); - } - out.writeVLong(nowInMillis); - OriginalIndices.writeOriginalIndices(originalIndices, out); - } - - @Override - public String toString() { - String sSource = "_na_"; - try { - sSource = XContentHelper.convertToJson(source, false); - } catch (Exception e) { - // ignore - } - return "delete_by_query {[" + index + "]" + Arrays.toString(types) + ", query [" + sSource + "]}"; - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java deleted file mode 100644 index 0800a639a81..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportDeleteByQueryAction.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.DestructiveOperations; -import org.elasticsearch.action.support.replication.TransportIndicesReplicationOperationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - */ -public class TransportDeleteByQueryAction extends TransportIndicesReplicationOperationAction { - - private final DestructiveOperations destructiveOperations; - - @Inject - public TransportDeleteByQueryAction(Settings settings, ClusterService clusterService, TransportService transportService, - ThreadPool threadPool, TransportIndexDeleteByQueryAction indexDeleteByQueryAction, - NodeSettingsService nodeSettingsService, ActionFilters actionFilters) { - super(settings, DeleteByQueryAction.NAME, transportService, clusterService, threadPool, indexDeleteByQueryAction, actionFilters); - this.destructiveOperations = new DestructiveOperations(logger, settings, nodeSettingsService); - } - - @Override - protected void doExecute(DeleteByQueryRequest request, ActionListener listener) { - destructiveOperations.failDestructive(request.indices()); - super.doExecute(request, listener); - } - - @Override - protected Map> resolveRouting(ClusterState clusterState, DeleteByQueryRequest request) throws ElasticsearchException { - return clusterState.metaData().resolveSearchRouting(request.routing(), request.indices()); - } - - @Override - protected DeleteByQueryRequest newRequestInstance() { - return new DeleteByQueryRequest(); - } - - @Override - protected DeleteByQueryResponse newResponseInstance(DeleteByQueryRequest request, AtomicReferenceArray indexResponses) { - DeleteByQueryResponse response = new DeleteByQueryResponse(); - for (int i = 0; i < indexResponses.length(); i++) { - IndexDeleteByQueryResponse indexResponse = (IndexDeleteByQueryResponse) indexResponses.get(i); - if (indexResponse != null) { - response.getIndices().put(indexResponse.getIndex(), indexResponse); - } - } - return response; - } - - @Override - protected boolean accumulateExceptions() { - return false; - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, DeleteByQueryRequest replicationPingRequest) { - return state.blocks().globalBlockedException(ClusterBlockLevel.READ); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, DeleteByQueryRequest request, String[] concreteIndices) { - return state.blocks().indicesBlockedException(ClusterBlockLevel.WRITE, concreteIndices); - } - - @Override - protected IndexDeleteByQueryRequest newIndexRequestInstance(DeleteByQueryRequest request, String index, Set routing, long startTimeInMillis) { - String[] filteringAliases = clusterService.state().metaData().filteringAliases(index, request.indices()); - return new IndexDeleteByQueryRequest(request, index, routing, filteringAliases, startTimeInMillis); - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java deleted file mode 100644 index 607459e7798..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportIndexDeleteByQueryAction.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; - -import java.util.List; - -/** - * Internal transport action that broadcasts a delete by query request to all of the shards that belong to an index. - */ -public class TransportIndexDeleteByQueryAction extends TransportIndexReplicationOperationAction { - - private static final String ACTION_NAME = DeleteByQueryAction.NAME + "[index]"; - - @Inject - public TransportIndexDeleteByQueryAction(Settings settings, ClusterService clusterService, - ThreadPool threadPool, TransportShardDeleteByQueryAction shardDeleteByQueryAction, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterService, threadPool, shardDeleteByQueryAction, actionFilters); - } - - @Override - protected IndexDeleteByQueryResponse newResponseInstance(IndexDeleteByQueryRequest request, List shardDeleteByQueryResponses, ActionWriteResponse.ShardInfo shardInfo) { - return new IndexDeleteByQueryResponse(request.index(), shardInfo); - } - - @Override - protected ClusterBlockException checkGlobalBlock(ClusterState state, IndexDeleteByQueryRequest request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); - } - - @Override - protected ClusterBlockException checkRequestBlock(ClusterState state, IndexDeleteByQueryRequest request) { - return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index()); - } - - @Override - protected GroupShardsIterator shards(IndexDeleteByQueryRequest request) { - return clusterService.operationRouting().deleteByQueryShards(clusterService.state(), request.index(), request.routing()); - } - - @Override - protected ShardDeleteByQueryRequest newShardRequestInstance(IndexDeleteByQueryRequest request, int shardId) { - return new ShardDeleteByQueryRequest(request, shardId); - } -} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java deleted file mode 100644 index 6e364302e83..00000000000 --- a/src/main/java/org/elasticsearch/action/deletebyquery/TransportShardDeleteByQueryAction.java +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.deletebyquery; - -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; -import org.elasticsearch.cache.recycler.PageCacheRecycler; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.action.shard.ShardStateAction; -import org.elasticsearch.cluster.routing.GroupShardsIterator; -import org.elasticsearch.cluster.routing.ShardIterator; -import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.engine.Engine; -import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.internal.DefaultSearchContext; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.ShardSearchLocalRequest; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; - -/** - * - */ -public class TransportShardDeleteByQueryAction extends TransportShardReplicationOperationAction { - - public final static String DELETE_BY_QUERY_API = "delete_by_query"; - - private static final String ACTION_NAME = DeleteByQueryAction.NAME + "[s]"; - - private final ScriptService scriptService; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; - - @Inject - public TransportShardDeleteByQueryAction(Settings settings, TransportService transportService, - ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, - ShardStateAction shardStateAction, ScriptService scriptService, - PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); - this.scriptService = scriptService; - this.pageCacheRecycler = pageCacheRecycler; - this.bigArrays = bigArrays; - } - - @Override - protected boolean checkWriteConsistency() { - return true; - } - - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - - @Override - protected ShardDeleteByQueryRequest newRequestInstance() { - return new ShardDeleteByQueryRequest(); - } - - @Override - protected ShardDeleteByQueryRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - - @Override - protected ShardDeleteByQueryResponse newResponseInstance() { - return new ShardDeleteByQueryResponse(); - } - - @Override - protected boolean resolveIndex() { - return false; - } - - @Override - protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) { - ShardDeleteByQueryRequest request = shardRequest.request; - IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); - - SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchLocalRequest(request.types(), request.nowInMillis()), null, - indexShard.acquireSearcher(DELETE_BY_QUERY_API), indexService, indexShard, scriptService, - pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter())); - try { - Engine.DeleteByQuery deleteByQuery = indexShard.prepareDeleteByQuery(request.source(), request.filteringAliases(), Engine.Operation.Origin.PRIMARY, request.types()); - SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query())); - indexShard.deleteByQuery(deleteByQuery); - } finally { - try (SearchContext searchContext = SearchContext.current()) { - SearchContext.removeCurrent(); - } - } - return new Tuple<>(new ShardDeleteByQueryResponse(), shardRequest.request); - } - - - @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - ShardDeleteByQueryRequest request = shardRequest.request; - IndexService indexService = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()); - IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); - - SearchContext.setCurrent(new DefaultSearchContext(0, new ShardSearchLocalRequest(request.types(), request.nowInMillis()), null, - indexShard.acquireSearcher(DELETE_BY_QUERY_API, true), indexService, indexShard, scriptService, - pageCacheRecycler, bigArrays, threadPool.estimatedTimeInMillisCounter())); - try { - Engine.DeleteByQuery deleteByQuery = indexShard.prepareDeleteByQuery(request.source(), request.filteringAliases(), Engine.Operation.Origin.REPLICA, request.types()); - SearchContext.current().parsedQuery(new ParsedQuery(deleteByQuery.query())); - indexShard.deleteByQuery(deleteByQuery); - } finally { - try (SearchContext searchContext = SearchContext.current()) { - SearchContext.removeCurrent(); - } - } - } - - @Override - protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { - GroupShardsIterator group = clusterService.operationRouting().deleteByQueryShards(clusterService.state(), request.concreteIndex(), request.request().routing()); - for (ShardIterator shardIt : group) { - if (shardIt.shardId().id() == request.request().shardId()) { - return shardIt; - } - } - throw new ElasticsearchIllegalStateException("No shards iterator found for shard [" + request.request().shardId() + "]"); - } -} diff --git a/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java b/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java index 9c9dbdb8a8b..a440d4a6eb1 100644 --- a/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java +++ b/src/main/java/org/elasticsearch/action/exists/TransportExistsAction.java @@ -65,18 +65,16 @@ import static org.elasticsearch.action.exists.ExistsRequest.DEFAULT_MIN_SCORE; public class TransportExistsAction extends TransportBroadcastOperationAction { private final IndicesService indicesService; - private final ScriptService scriptService; - private final PageCacheRecycler pageCacheRecycler; - private final BigArrays bigArrays; @Inject public TransportExistsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ScriptService scriptService, PageCacheRecycler pageCacheRecycler, BigArrays bigArrays, ActionFilters actionFilters) { - super(settings, ExistsAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ExistsAction.NAME, threadPool, clusterService, transportService, actionFilters, + ExistsRequest.class, ShardExistsRequest.class, ThreadPool.Names.SEARCH); this.indicesService = indicesService; this.scriptService = scriptService; this.pageCacheRecycler = pageCacheRecycler; @@ -89,21 +87,6 @@ public class TransportExistsAction extends TransportBroadcastOperationAction implements Streamable, ToXContent { + + private byte type; + private long maxDoc; + private long docCount; + private long sumDocFreq; + private long sumTotalTermFreq; + protected T minValue; + protected T maxValue; + + protected FieldStats() { + } + + protected FieldStats(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq) { + this.type = (byte) type; + this.maxDoc = maxDoc; + this.docCount = docCount; + this.sumDocFreq = sumDocFreq; + this.sumTotalTermFreq = sumTotalTermFreq; + } + + byte getType() { + return type; + } + + /** + * @return the total number of documents. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getMaxDoc() { + return maxDoc; + } + + /** + * @return the number of documents that have at least one term for this field, or -1 if this measurement isn't available. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getDocCount() { + return docCount; + } + + /** + * @return The percentage of documents that have at least one value for this field. + * + * This is a derived statistic and is based on: 'doc_count / max_doc' + */ + public int getDensity() { + if (docCount < 0 || maxDoc <= 0) { + return -1; + } + return (int) (docCount * 100 / maxDoc); + } + + /** + * @return the sum of each term's document frequency in this field, or -1 if this measurement isn't available. + * Document frequency is the number of documents containing a particular term. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getSumDocFreq() { + return sumDocFreq; + } + + /** + * @return the sum of the term frequencies of all terms in this field across all documents, or -1 if this measurement + * isn't available. Term frequency is the total number of occurrences of a term in a particular document and field. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public long getSumTotalTermFreq() { + return sumTotalTermFreq; + } + + /** + * @return the lowest value in the field represented as a string. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public abstract String getMinValue(); + + /** + * @return the highest value in the field represented as a string. + * + * Note that, documents marked as deleted that haven't yet been merged way aren't taken into account. + */ + public abstract String getMaxValue(); + + /** + * Merges the provided stats into this stats instance. + */ + public void append(FieldStats stats) { + this.maxDoc += stats.maxDoc; + if (stats.docCount == -1) { + this.docCount = -1; + } else if (this.docCount != -1) { + this.docCount += stats.docCount; + } + if (stats.sumDocFreq == -1) { + this.sumDocFreq = -1; + } else if (this.sumDocFreq != -1) { + this.sumDocFreq += stats.sumDocFreq; + } + if (stats.sumTotalTermFreq == -1) { + this.sumTotalTermFreq = -1; + } else if (this.sumTotalTermFreq != -1) { + this.sumTotalTermFreq += stats.sumTotalTermFreq; + } + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(Fields.MAX_DOC, maxDoc); + builder.field(Fields.DOC_COUNT, docCount); + builder.field(Fields.DENSITY, getDensity()); + builder.field(Fields.SUM_DOC_FREQ, sumDocFreq); + builder.field(Fields.SUM_TOTAL_TERM_FREQ, sumTotalTermFreq); + toInnerXContent(builder); + builder.endObject(); + return builder; + } + + protected void toInnerXContent(XContentBuilder builder) throws IOException { + builder.field(Fields.MIN_VALUE, minValue); + builder.field(Fields.MAX_VALUE, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + maxDoc = in.readVLong(); + docCount = in.readLong(); + sumDocFreq = in.readLong(); + sumTotalTermFreq = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(type); + out.writeVLong(maxDoc); + out.writeLong(docCount); + out.writeLong(sumDocFreq); + out.writeLong(sumTotalTermFreq); + } + + public static class Long extends FieldStats { + + public Long() { + } + + public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + this(0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + } + + protected Long(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) { + super(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return String.valueOf(minValue.longValue()); + } + + @Override + public String getMaxValue() { + return String.valueOf(maxValue.longValue()); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Long other = (Long) stats; + this.minValue = Math.min(other.minValue, minValue); + this.maxValue = Math.max(other.maxValue, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readLong(); + maxValue = in.readLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeLong(minValue); + out.writeLong(maxValue); + } + + } + + public static final class Float extends FieldStats { + + public Float() { + } + + public Float(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, float minValue, float maxValue) { + super(1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return String.valueOf(minValue.floatValue()); + } + + @Override + public String getMaxValue() { + return String.valueOf(maxValue.floatValue()); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Float other = (Float) stats; + this.minValue = Math.min(other.minValue, minValue); + this.maxValue = Math.max(other.maxValue, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readFloat(); + maxValue = in.readFloat(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeFloat(minValue); + out.writeFloat(maxValue); + } + + } + + public static final class Double extends FieldStats { + + public Double() { + } + + public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, double minValue, double maxValue) { + super(2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return String.valueOf(minValue.doubleValue()); + } + + @Override + public String getMaxValue() { + return String.valueOf(maxValue.doubleValue()); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Double other = (Double) stats; + this.minValue = Math.min(other.minValue, minValue); + this.maxValue = Math.max(other.maxValue, maxValue); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readDouble(); + maxValue = in.readDouble(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeDouble(minValue); + out.writeDouble(maxValue); + } + + } + + public static final class Text extends FieldStats { + + public Text() { + } + + public Text(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, BytesRef minValue, BytesRef maxValue) { + super(3, maxDoc, docCount, sumDocFreq, sumTotalTermFreq); + this.minValue = minValue; + this.maxValue = maxValue; + } + + @Override + public String getMinValue() { + return minValue.utf8ToString(); + } + + @Override + public String getMaxValue() { + return maxValue.utf8ToString(); + } + + @Override + public void append(FieldStats stats) { + super.append(stats); + Text other = (Text) stats; + if (other.minValue.compareTo(minValue) < 0) { + minValue = other.minValue; + } + if (other.maxValue.compareTo(maxValue) > 0) { + maxValue = other.maxValue; + } + } + + @Override + protected void toInnerXContent(XContentBuilder builder) throws IOException { + builder.field(Fields.MIN_VALUE, getMinValue()); + builder.field(Fields.MAX_VALUE, getMaxValue()); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + minValue = in.readBytesRef(); + maxValue = in.readBytesRef(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeBytesRef(minValue); + out.writeBytesRef(maxValue); + } + + } + + public static final class Date extends Long { + + private FormatDateTimeFormatter dateFormatter; + + public Date() { + } + + public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue, FormatDateTimeFormatter dateFormatter) { + super(4, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue); + this.dateFormatter = dateFormatter; + } + + @Override + public String getMinValue() { + return dateFormatter.printer().print(minValue); + } + + @Override + public String getMaxValue() { + return dateFormatter.printer().print(maxValue); + } + + @Override + protected void toInnerXContent(XContentBuilder builder) throws IOException { + builder.field(Fields.MIN_VALUE, getMinValue()); + builder.field(Fields.MAX_VALUE, getMaxValue()); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + dateFormatter = Joda.forPattern(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(dateFormatter.format()); + } + + } + + public static FieldStats read(StreamInput in) throws IOException { + FieldStats stats; + byte type = in.readByte(); + switch (type) { + case 0: + stats = new Long(); + break; + case 1: + stats = new Float(); + break; + case 2: + stats = new Double(); + break; + case 3: + stats = new Text(); + break; + case 4: + stats = new Date(); + break; + default: + throw new IllegalArgumentException("Illegal type [" + type + "]"); + } + stats.type = type; + stats.readFrom(in); + return stats; + } + + private final static class Fields { + + final static XContentBuilderString MAX_DOC = new XContentBuilderString("max_doc"); + final static XContentBuilderString DOC_COUNT = new XContentBuilderString("doc_count"); + final static XContentBuilderString DENSITY = new XContentBuilderString("density"); + final static XContentBuilderString SUM_DOC_FREQ = new XContentBuilderString("sum_doc_freq"); + final static XContentBuilderString SUM_TOTAL_TERM_FREQ = new XContentBuilderString("sum_total_term_freq"); + final static XContentBuilderString MIN_VALUE = new XContentBuilderString("min_value"); + final static XContentBuilderString MAX_VALUE = new XContentBuilderString("max_value"); + + } + +} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java similarity index 61% rename from src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java rename to src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java index 8a35aef6818..fb4a3f77833 100644 --- a/src/main/java/org/elasticsearch/action/deletebyquery/DeleteByQueryAction.java +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsAction.java @@ -17,29 +17,29 @@ * under the License. */ -package org.elasticsearch.action.deletebyquery; +package org.elasticsearch.action.fieldstats; import org.elasticsearch.action.ClientAction; import org.elasticsearch.client.Client; /** */ -public class DeleteByQueryAction extends ClientAction { +public class FieldStatsAction extends ClientAction { - public static final DeleteByQueryAction INSTANCE = new DeleteByQueryAction(); - public static final String NAME = "indices:data/write/delete/by_query"; + public static final FieldStatsAction INSTANCE = new FieldStatsAction(); + public static final String NAME = "indices:data/read/field_stats"; - private DeleteByQueryAction() { + private FieldStatsAction() { super(NAME); } @Override - public DeleteByQueryResponse newResponse() { - return new DeleteByQueryResponse(); + public FieldStatsResponse newResponse() { + return new FieldStatsResponse(); } @Override - public DeleteByQueryRequestBuilder newRequestBuilder(Client client) { - return new DeleteByQueryRequestBuilder(client); + public FieldStatsRequestBuilder newRequestBuilder(Client client) { + return new FieldStatsRequestBuilder(client); } } diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java new file mode 100644 index 00000000000..ff61fe88ee9 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.ValidateActions; +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequest; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + */ +public class FieldStatsRequest extends BroadcastOperationRequest { + + public final static String DEFAULT_LEVEL = "cluster"; + + private String[] fields; + private String level = DEFAULT_LEVEL; + + public String[] fields() { + return fields; + } + + public void fields(String[] fields) { + this.fields = fields; + } + + public String level() { + return level; + } + + public void level(String level) { + this.level = level; + } + + @Override + public ActionRequestValidationException validate() { + ActionRequestValidationException validationException = super.validate(); + if ("cluster".equals(level) == false && "indices".equals(level) == false) { + validationException = ValidateActions.addValidationError("invalid level option [" + level + "]", validationException); + } + return validationException; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + fields = in.readStringArray(); + level = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeStringArrayNullable(fields); + out.writeString(level); + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java new file mode 100644 index 00000000000..4c8d0b6c73b --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsRequestBuilder.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder; +import org.elasticsearch.client.Client; + +/** + */ +public class FieldStatsRequestBuilder extends BroadcastOperationRequestBuilder { + + public FieldStatsRequestBuilder(Client client) { + super(client, new FieldStatsRequest()); + } + + public FieldStatsRequestBuilder setFields(String... fields) { + request().fields(fields); + return this; + } + + public FieldStatsRequestBuilder setLevel(String level) { + request().level(level); + return this; + } + + @Override + protected void doExecute(ActionListener listener) { + client.fieldStats(request, listener); + } +} diff --git a/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java new file mode 100644 index 00000000000..e6f69e9791a --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsResponse.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + */ +public class FieldStatsResponse extends BroadcastOperationResponse { + + private Map> indicesMergedFieldStats; + + public FieldStatsResponse() { + } + + public FieldStatsResponse(int totalShards, int successfulShards, int failedShards, List shardFailures, Map> indicesMergedFieldStats) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.indicesMergedFieldStats = indicesMergedFieldStats; + } + + @Nullable + public Map getAllFieldStats() { + return indicesMergedFieldStats.get("_all"); + } + + public Map> getIndicesMergedFieldStats() { + return indicesMergedFieldStats; + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + final int size = in.readVInt(); + indicesMergedFieldStats = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String key = in.readString(); + int indexSize = in.readVInt(); + Map indexFieldStats = new HashMap<>(indexSize); + indicesMergedFieldStats.put(key, indexFieldStats); + for (int j = 0; j < indexSize; j++) { + key = in.readString(); + FieldStats value = FieldStats.read(in); + indexFieldStats.put(key, value); + } + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(indicesMergedFieldStats.size()); + for (Map.Entry> entry1 : indicesMergedFieldStats.entrySet()) { + out.writeString(entry1.getKey()); + out.writeVInt(entry1.getValue().size()); + for (Map.Entry entry2 : entry1.getValue().entrySet()) { + out.writeString(entry2.getKey()); + entry2.getValue().writeTo(out); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java similarity index 65% rename from src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java rename to src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java index 2c8d4001bae..fb46ff66d3b 100644 --- a/src/main/java/org/elasticsearch/action/deletebyquery/IndexDeleteByQueryResponse.java +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardRequest.java @@ -17,45 +17,43 @@ * under the License. */ -package org.elasticsearch.action.deletebyquery; +package org.elasticsearch.action.fieldstats; -import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; /** - * Delete by query response executed on a specific index. */ -public class IndexDeleteByQueryResponse extends ActionWriteResponse { +public class FieldStatsShardRequest extends BroadcastShardOperationRequest { - private String index; + private String[] fields; - IndexDeleteByQueryResponse(String index, ShardInfo failures) { - this.index = index; - setShardInfo(failures); + public FieldStatsShardRequest() { } - IndexDeleteByQueryResponse() { + public FieldStatsShardRequest(ShardId shardId, FieldStatsRequest request) { + super(shardId, request); + this.fields = request.fields(); } - /** - * The index the delete by query operation was executed against. - */ - public String getIndex() { - return this.index; + public String[] getFields() { + return fields; } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - index = in.readString(); + fields = in.readStringArray(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeString(index); + out.writeStringArrayNullable(fields); } -} \ No newline at end of file + +} diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java similarity index 52% rename from src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java rename to src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java index 7375038ddb6..ada4552e94c 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownResponse.java +++ b/src/main/java/org/elasticsearch/action/fieldstats/FieldStatsShardResponse.java @@ -17,57 +17,55 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.node.shutdown; +package org.elasticsearch.action.fieldstats; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; /** - * */ -public class NodesShutdownResponse extends ActionResponse { +public class FieldStatsShardResponse extends BroadcastShardOperationResponse { - private ClusterName clusterName; - private DiscoveryNode[] nodes; + private Map fieldStats; - NodesShutdownResponse() { + public FieldStatsShardResponse() { } - public NodesShutdownResponse(ClusterName clusterName, DiscoveryNode[] nodes) { - this.clusterName = clusterName; - this.nodes = nodes; + public FieldStatsShardResponse(ShardId shardId, Map fieldStats) { + super(shardId); + this.fieldStats = fieldStats; } - public ClusterName getClusterName() { - return this.clusterName; + public Map getFieldStats() { + return fieldStats; } - public DiscoveryNode[] getNodes() { - return this.nodes; - } @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - clusterName = ClusterName.readClusterName(in); - nodes = new DiscoveryNode[in.readVInt()]; - for (int i = 0; i < nodes.length; i++) { - nodes[i] = DiscoveryNode.readNode(in); + final int size = in.readVInt(); + fieldStats = new HashMap<>(size); + for (int i = 0; i < size; i++) { + String key = in.readString(); + FieldStats value = FieldStats.read(in); + fieldStats.put(key, value); } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - clusterName.writeTo(out); - out.writeVInt(nodes.length); - for (DiscoveryNode node : nodes) { - node.writeTo(out); + out.writeVInt(fieldStats.size()); + for (Map.Entry entry : fieldStats.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java b/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java new file mode 100644 index 00000000000..2816a655bb7 --- /dev/null +++ b/src/main/java/org/elasticsearch/action/fieldstats/TransportFieldStatsTransportAction.java @@ -0,0 +1,167 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.fieldstats; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.Terms; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ShardOperationFailedException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.TransportBroadcastOperationAction; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.routing.GroupShardsIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.FieldMappers; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.*; +import java.util.concurrent.atomic.AtomicReferenceArray; + +public class TransportFieldStatsTransportAction extends TransportBroadcastOperationAction { + + private final IndicesService indicesService; + + @Inject + public TransportFieldStatsTransportAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, IndicesService indicesService) { + super(settings, FieldStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, FieldStatsRequest.class, FieldStatsShardRequest.class, ThreadPool.Names.MANAGEMENT); + this.indicesService = indicesService; + } + + @Override + protected FieldStatsResponse newResponse(FieldStatsRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) { + int successfulShards = 0; + int failedShards = 0; + Map> indicesMergedFieldStats = new HashMap<>(); + List shardFailures = new ArrayList<>(); + for (int i = 0; i < shardsResponses.length(); i++) { + Object shardValue = shardsResponses.get(i); + if (shardValue == null) { + // simply ignore non active shards + } else if (shardValue instanceof BroadcastShardOperationFailedException) { + failedShards++; + shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardValue)); + } else { + successfulShards++; + FieldStatsShardResponse shardResponse = (FieldStatsShardResponse) shardValue; + + final String indexName; + if ("cluster".equals(request.level())) { + indexName = "_all"; + } else if ("indices".equals(request.level())) { + indexName = shardResponse.getIndex(); + } else { + // should already have been catched by the FieldStatsRequest#validate(...) + throw new IllegalArgumentException("Illegal level option [" + request.level() + "]"); + } + + Map indexMergedFieldStats = indicesMergedFieldStats.get(indexName); + if (indexMergedFieldStats == null) { + indicesMergedFieldStats.put(indexName, indexMergedFieldStats = new HashMap<>()); + } + + Map fieldStats = shardResponse.getFieldStats(); + for (Map.Entry entry : fieldStats.entrySet()) { + FieldStats existing = indexMergedFieldStats.get(entry.getKey()); + if (existing != null) { + if (existing.getType() != entry.getValue().getType()) { + throw new IllegalStateException( + "trying to merge the field stats of field [" + entry.getKey() + "] from index [" + shardResponse.getIndex() + "] but the field type is incompatible, try to set the 'level' option to 'indices'" + ); + } + + existing.append(entry.getValue()); + } else { + indexMergedFieldStats.put(entry.getKey(), entry.getValue()); + } + } + } + } + return new FieldStatsResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, indicesMergedFieldStats); + } + + @Override + protected FieldStatsShardRequest newShardRequest(int numShards, ShardRouting shard, FieldStatsRequest request) { + return new FieldStatsShardRequest(shard.shardId(), request); + } + + @Override + protected FieldStatsShardResponse newShardResponse() { + return new FieldStatsShardResponse(); + } + + @Override + protected FieldStatsShardResponse shardOperation(FieldStatsShardRequest request) { + ShardId shardId = request.shardId(); + Map fieldStats = new HashMap<>(); + IndexService indexServices = indicesService.indexServiceSafe(shardId.getIndex()); + MapperService mapperService = indexServices.mapperService(); + IndexShard shard = indexServices.shardSafe(shardId.id()); + shard.readAllowed(); + try (Engine.Searcher searcher = shard.acquireSearcher("fieldstats")) { + for (String field : request.getFields()) { + FieldMappers fieldMappers = mapperService.fullName(field); + if (fieldMappers != null) { + IndexReader reader = searcher.reader(); + Terms terms = MultiFields.getTerms(reader, field); + if (terms != null) { + fieldStats.put(field, fieldMappers.mapper().stats(terms, reader.maxDoc())); + } + } else { + throw new IllegalArgumentException("field [" + field + "] doesn't exist"); + } + } + } catch (IOException e) { + throw ExceptionsHelper.convertToElastic(e); + } + return new FieldStatsShardResponse(shardId, fieldStats); + } + + @Override + protected GroupShardsIterator shards(ClusterState clusterState, FieldStatsRequest request, String[] concreteIndices) { + return clusterService.operationRouting().searchShards(clusterState, request.indices(), concreteIndices, null, null); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, FieldStatsRequest request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, FieldStatsRequest request, String[] concreteIndices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices); + } +} diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 74e76253ca2..67c6141273c 100644 --- a/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.get; import com.google.common.collect.Iterators; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.action.support.IndicesOptions; @@ -376,7 +375,7 @@ public class MultiGetRequest extends ActionRequest implements I XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("docs array element should include an object"); + throw new IllegalArgumentException("docs array element should include an object"); } String index = defaultIndex; String type = defaultType; @@ -395,7 +394,7 @@ public class MultiGetRequest extends ActionRequest implements I } else if (token.isValue()) { if ("_index".equals(currentFieldName)) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi get is not allowed"); + throw new IllegalArgumentException("explicit index in multi get is not allowed"); } index = parser.text(); } else if ("_type".equals(currentFieldName)) { @@ -486,7 +485,7 @@ public class MultiGetRequest extends ActionRequest implements I XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (!token.isValue()) { - throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids"); + throw new IllegalArgumentException("ids array element should only contain ids"); } items.add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting)); } diff --git a/src/main/java/org/elasticsearch/action/get/TransportGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportGetAction.java index 39626a18b07..939f5c3b27c 100644 --- a/src/main/java/org/elasticsearch/action/get/TransportGetAction.java +++ b/src/main/java/org/elasticsearch/action/get/TransportGetAction.java @@ -49,17 +49,13 @@ public class TransportGetAction extends TransportShardSingleOperationAction listener) { ClusterState clusterState = clusterService.state(); diff --git a/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java b/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java index dc93cd935b1..fb6bac8cdc8 100644 --- a/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java +++ b/src/main/java/org/elasticsearch/action/get/TransportShardMultiGetAction.java @@ -48,7 +48,8 @@ public class TransportShardMultiGetAction extends TransportShardSingleOperationA @Inject public TransportShardMultiGetAction(Settings settings, ClusterService clusterService, TransportService transportService, IndicesService indicesService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, + MultiGetShardRequest.class, ThreadPool.Names.GET); this.indicesService = indicesService; this.realtime = settings.getAsBoolean("action.get.realtime", true); @@ -59,16 +60,6 @@ public class TransportShardMultiGetAction extends TransportShardSingleOperationA return true; } - @Override - protected String executor() { - return ThreadPool.Names.GET; - } - - @Override - protected MultiGetShardRequest newRequest() { - return new MultiGetShardRequest(); - } - @Override protected MultiGetShardResponse newResponse() { return new MultiGetShardResponse(); @@ -93,7 +84,7 @@ public class TransportShardMultiGetAction extends TransportShardSingleOperationA } @Override - protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) throws ElasticsearchException { + protected MultiGetShardResponse shardOperation(MultiGetShardRequest request, ShardId shardId) { IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); IndexShard indexShard = indexService.shardSafe(shardId.id()); diff --git a/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/src/main/java/org/elasticsearch/action/index/IndexRequest.java index 5dcd790a422..1f766b7a174 100644 --- a/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -22,7 +22,6 @@ package org.elasticsearch.action.index; import com.google.common.base.Charsets; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.action.support.replication.ShardReplicationOperationRequest; @@ -109,11 +108,11 @@ public class IndexRequest extends ShardReplicationOperationRequest } else if (id == 1) { return CREATE; } else { - throw new ElasticsearchIllegalArgumentException("No type match for [" + id + "]"); + throw new IllegalArgumentException("No type match for [" + id + "]"); } } - public static OpType fromString(String sOpType) throws ElasticsearchIllegalArgumentException { + public static OpType fromString(String sOpType) { String lowersOpType = sOpType.toLowerCase(Locale.ROOT); switch(lowersOpType){ case "create": @@ -121,7 +120,7 @@ public class IndexRequest extends ShardReplicationOperationRequest case "index": return OpType.INDEX; default: - throw new ElasticsearchIllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); + throw new IllegalArgumentException("opType [" + sOpType + "] not allowed, either [index] or [create] are allowed"); } } @@ -326,7 +325,7 @@ public class IndexRequest extends ShardReplicationOperationRequest return this; } if (ttl <= 0) { - throw new ElasticsearchIllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]"); + throw new IllegalArgumentException("TTL value must be > 0. Illegal value provided [" + ttl + "]"); } this.ttl = ttl; return this; @@ -490,7 +489,7 @@ public class IndexRequest extends ShardReplicationOperationRequest * Sets a string representation of the {@link #opType(org.elasticsearch.action.index.IndexRequest.OpType)}. Can * be either "index" or "create". */ - public IndexRequest opType(String opType) throws ElasticsearchIllegalArgumentException { + public IndexRequest opType(String opType) { return opType(OpType.fromString(opType)); } @@ -559,7 +558,7 @@ public class IndexRequest extends ShardReplicationOperationRequest return this.autoGeneratedId; } - public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) throws ElasticsearchException { + public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) { // resolve the routing if needed routing(metaData.resolveIndexRouting(routing, index)); // resolve timestamp if provided externally @@ -608,11 +607,11 @@ public class IndexRequest extends ShardReplicationOperationRequest } if (parent != null && !mappingMd.hasParentField()) { - throw new ElasticsearchIllegalArgumentException("Can't specify parent if no parent field has been configured"); + throw new IllegalArgumentException("Can't specify parent if no parent field has been configured"); } } else { if (parent != null) { - throw new ElasticsearchIllegalArgumentException("Can't specify parent if no parent field has been configured"); + throw new IllegalArgumentException("Can't specify parent if no parent field has been configured"); } } diff --git a/src/main/java/org/elasticsearch/action/index/IndexResponse.java b/src/main/java/org/elasticsearch/action/index/IndexResponse.java index 0074d87b563..5727b2b673b 100644 --- a/src/main/java/org/elasticsearch/action/index/IndexResponse.java +++ b/src/main/java/org/elasticsearch/action/index/IndexResponse.java @@ -105,4 +105,17 @@ public class IndexResponse extends ActionWriteResponse { out.writeLong(version); out.writeBoolean(created); } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("IndexResponse["); + builder.append("index=").append(index); + builder.append(",type=").append(type); + builder.append(",id=").append(id); + builder.append(",version=").append(version); + builder.append(",created=").append(created); + builder.append(",shards=").append(getShardInfo()); + return builder.append("]").toString(); + } } diff --git a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java index 79ea496c317..8e81009b653 100644 --- a/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java +++ b/src/main/java/org/elasticsearch/action/index/TransportIndexAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction; +import org.elasticsearch.action.index.IndexRequest.OpType; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.replication.TransportShardReplicationOperationAction; @@ -42,9 +43,11 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexAlreadyExistsException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.river.RiverIndexName; @@ -64,22 +67,23 @@ import org.elasticsearch.transport.TransportService; public class TransportIndexAction extends TransportShardReplicationOperationAction { private final AutoCreateIndex autoCreateIndex; - private final boolean allowIdGeneration; - private final TransportCreateIndexAction createIndexAction; - private final MappingUpdatedAction mappingUpdatedAction; + private final ClusterService clusterService; + @Inject public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService, IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction, TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters) { - super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters); + super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, actionFilters, + IndexRequest.class, IndexRequest.class, ThreadPool.Names.INDEX); this.createIndexAction = createIndexAction; this.mappingUpdatedAction = mappingUpdatedAction; this.autoCreateIndex = new AutoCreateIndex(settings); this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true); + this.clusterService = clusterService; } @Override @@ -141,49 +145,17 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi return true; } - @Override - protected IndexRequest newRequestInstance() { - return new IndexRequest(); - } - - @Override - protected IndexRequest newReplicaRequestInstance() { - return newRequestInstance(); - } - @Override protected IndexResponse newResponseInstance() { return new IndexResponse(); } - @Override - protected String executor() { - return ThreadPool.Names.INDEX; - } - @Override protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { return clusterService.operationRouting() .indexShards(clusterService.state(), request.concreteIndex(), request.request().type(), request.request().id(), request.request().routing()); } - private void applyMappingUpdate(IndexService indexService, String type, Mapping update) throws Throwable { - // HACK: Rivers seem to have something specific that triggers potential - // deadlocks when doing concurrent indexing. So for now they keep the - // old behaviour of updating mappings locally first and then - // asynchronously notifying the master - // this can go away when rivers are removed - final String indexName = indexService.index().name(); - final String indexUUID = indexService.indexUUID(); - if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - mappingUpdatedAction.updateMappingOnMaster(indexName, indexUUID, type, update, null); - } else { - mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, indexUUID, type, update); - indexService.mapperService().merge(type, new CompressedString(update.toBytes()), true); - } - } - @Override protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { final IndexRequest request = shardRequest.request; @@ -201,27 +173,39 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi IndexShard indexShard = indexService.shardSafe(shardRequest.shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.PRIMARY, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); - long version; - boolean created; + final Engine.IndexingOperation operation; if (request.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); - if (index.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, request.type(), index.parsedDoc().dynamicMappingsUpdate()); - } - indexShard.index(index); - version = index.version(); - created = index.created(); + operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates()); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, + assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); + operation = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.PRIMARY, request.canHaveDuplicates(), request.autoGeneratedId()); - if (create.parsedDoc().dynamicMappingsUpdate() != null) { - applyMappingUpdate(indexService, request.type(), create.parsedDoc().dynamicMappingsUpdate()); - } - indexShard.create(create); - version = create.version(); - created = true; } + + final boolean created; + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + final String indexName = indexService.index().name(); + if (indexName.equals(RiverIndexName.Conf.indexName(settings))) { + // With rivers, we have a chicken and egg problem if indexing + // the _meta document triggers a mapping update. Because we would + // like to validate the mapping update first, but on the other + // hand putting the mapping would start the river, which expects + // to find a _meta document + // So we have no choice but to index first and send mappings afterwards + MapperService mapperService = indexService.mapperService(); + mapperService.merge(request.type(), new CompressedString(update.toBytes()), true); + created = operation.execute(indexShard); + mappingUpdatedAction.updateMappingOnMasterAsynchronously(indexName, request.type(), update); + } else { + mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update); + created = operation.execute(indexShard); + } + } else { + created = operation.execute(indexShard); + } + if (request.refresh()) { try { indexShard.refresh("refresh_flag_index"); @@ -231,6 +215,7 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi } // update the version on the request, so it will be used for the replicas + final long version = operation.version(); request.version(version); request.versionType(request.versionType().versionTypeForReplicationAndRecovery()); @@ -239,19 +224,24 @@ public class TransportIndexAction extends TransportShardReplicationOperationActi } @Override - protected void shardOperationOnReplica(ReplicaOperationRequest shardRequest) { - IndexShard indexShard = indicesService.indexServiceSafe(shardRequest.shardId.getIndex()).shardSafe(shardRequest.shardId.id()); - IndexRequest request = shardRequest.request; + protected void shardOperationOnReplica(ShardId shardId, IndexRequest request) { + IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); + IndexShard indexShard = indexService.shardSafe(shardId.id()); SourceToParse sourceToParse = SourceToParse.source(SourceToParse.Origin.REPLICA, request.source()).type(request.type()).id(request.id()) .routing(request.routing()).parent(request.parent()).timestamp(request.timestamp()).ttl(request.ttl()); + + final Engine.IndexingOperation operation; if (request.opType() == IndexRequest.OpType.INDEX) { - Engine.Index index = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates()); - indexShard.index(index); + operation = indexShard.prepareIndex(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates()); } else { - Engine.Create create = indexShard.prepareCreate(sourceToParse, - request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); - indexShard.create(create); + assert request.opType() == IndexRequest.OpType.CREATE : request.opType(); + operation = indexShard.prepareCreate(sourceToParse, request.version(), request.versionType(), Engine.Operation.Origin.REPLICA, request.canHaveDuplicates(), request.autoGeneratedId()); } + Mapping update = operation.parsedDoc().dynamicMappingsUpdate(); + if (update != null) { + throw new RetryOnReplicaException(shardId, "Mappings are not available on the replica yet, triggered update: " + update); + } + operation.execute(indexShard); if (request.refresh()) { try { indexShard.refresh("refresh_flag_index"); diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java index f8c14d6d97a..fc205916e43 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/delete/TransportDeleteIndexedScriptAction.java @@ -40,15 +40,10 @@ public class TransportDeleteIndexedScriptAction extends HandledTransportAction listener) { scriptService.deleteScriptFromIndex(request, new DelegatingActionListener(listener) { diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java index 90a24968d94..1777258af59 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/get/TransportGetIndexedScriptAction.java @@ -39,15 +39,10 @@ public class TransportGetIndexedScriptAction extends HandledTransportAction listener){ // forward the handling to the script service we are running on a network thread here... diff --git a/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java index 3fcb60992f5..44c6e3679eb 100644 --- a/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/action/indexedscripts/put/TransportPutIndexedScriptAction.java @@ -40,15 +40,10 @@ public class TransportPutIndexedScriptAction extends HandledTransportAction listener) { scriptService.putScriptToIndex(request, new DelegatingActionListener(listener) { diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java index 202383dbf3c..b0d6bbb6c41 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequest.java @@ -21,7 +21,6 @@ package org.elasticsearch.action.mlt; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.*; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchType; @@ -475,7 +474,7 @@ public class MoreLikeThisRequest extends ActionRequest impl /** * The search type of the mlt search query. */ - public MoreLikeThisRequest searchType(String searchType) throws ElasticsearchIllegalArgumentException { + public MoreLikeThisRequest searchType(String searchType) { return searchType(SearchType.fromString(searchType)); } diff --git a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java index 5b10269f3c2..ad8f7e43da0 100644 --- a/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/mlt/MoreLikeThisRequestBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.mlt; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -213,7 +212,7 @@ public class MoreLikeThisRequestBuilder extends ActionRequestBuilder { private final TransportSearchAction searchAction; - private final TransportGetAction getAction; - private final IndicesService indicesService; - private final ClusterService clusterService; - private final TransportService transportService; @Inject public TransportMoreLikeThisAction(Settings settings, ThreadPool threadPool, TransportSearchAction searchAction, TransportGetAction getAction, ClusterService clusterService, IndicesService indicesService, TransportService transportService, ActionFilters actionFilters) { - super(settings, MoreLikeThisAction.NAME, threadPool, transportService, actionFilters); + super(settings, MoreLikeThisAction.NAME, threadPool, transportService, actionFilters, MoreLikeThisRequest.class); this.searchAction = searchAction; this.getAction = getAction; this.indicesService = indicesService; @@ -89,11 +84,6 @@ public class TransportMoreLikeThisAction extends HandledTransportAction listener) { // update to actual index name @@ -312,7 +302,7 @@ public class TransportMoreLikeThisAction extends HandledTransportAction private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException { String globalIndex = indices != null && indices.length > 0 ? indices[0] : null; - Map header = parseToMap(parser); - - IndicesOptions defaultOptions = indicesOptions; - boolean ignoreUnavailable = defaultOptions.ignoreUnavailable(); - boolean allowNoIndices = defaultOptions.allowNoIndices(); - boolean expandWildcardsOpen = defaultOptions.expandWildcardsOpen(); - boolean expandWildcardsClosed = defaultOptions.expandWildcardsClosed(); + Map header = parser.map(); if (header.containsKey("id")) { GetRequest getRequest = new GetRequest(globalIndex); @@ -189,52 +181,27 @@ public class MultiPercolateRequest extends ActionRequest for (Map.Entry entry : header.entrySet()) { Object value = entry.getValue(); if ("id".equals(entry.getKey())) { - getRequest.id((String) value); + getRequest.id(nodeStringValue(value, null)); header.put("id", entry.getValue()); } else if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); + throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); } - getRequest.index((String) value); + getRequest.index(nodeStringValue(value, null)); } else if ("type".equals(entry.getKey())) { - getRequest.type((String) value); + getRequest.type(nodeStringValue(value, null)); } else if ("preference".equals(entry.getKey())) { - getRequest.preference((String) value); + getRequest.preference(nodeStringValue(value, null)); } else if ("routing".equals(entry.getKey())) { - getRequest.routing((String) value); + getRequest.routing(nodeStringValue(value, null)); } else if ("percolate_index".equals(entry.getKey()) || "percolate_indices".equals(entry.getKey()) || "percolateIndex".equals(entry.getKey()) || "percolateIndices".equals(entry.getKey())) { - if (value instanceof String[]) { - percolateRequest.indices((String[]) value); - } else { - percolateRequest.indices(Strings.splitStringByCommaToArray((String) value)); - } + percolateRequest.indices(nodeStringArrayValue(value)); } else if ("percolate_type".equals(entry.getKey()) || "percolateType".equals(entry.getKey())) { - percolateRequest.documentType((String) value); + percolateRequest.documentType(nodeStringValue(value, null)); } else if ("percolate_preference".equals(entry.getKey()) || "percolatePreference".equals(entry.getKey())) { - percolateRequest.preference((String) value); + percolateRequest.preference(nodeStringValue(value, null)); } else if ("percolate_routing".equals(entry.getKey()) || "percolateRouting".equals(entry.getKey())) { - percolateRequest.routing((String) value); - } else if ("ignore_unavailable".equals(entry.getKey()) || "ignoreUnavailable".equals(entry.getKey())) { - ignoreUnavailable = Boolean.valueOf((String) value); - } else if ("allow_no_indices".equals(entry.getKey()) || "allowNoIndices".equals(entry.getKey())) { - allowNoIndices = Boolean.valueOf((String) value); - } else if ("expand_wildcards".equals(entry.getKey()) || "expandWildcards".equals(entry.getKey())) { - String[] wildcards; - if (value instanceof String[]) { - wildcards = (String[]) value; - } else { - wildcards = Strings.splitStringByCommaToArray((String) value); - } - - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } + percolateRequest.routing(nodeStringValue(value, null)); } } @@ -256,70 +223,19 @@ public class MultiPercolateRequest extends ActionRequest Object value = entry.getValue(); if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi percolate is not allowed"); - } - if (value instanceof String[]) { - percolateRequest.indices((String[]) value); - } else { - percolateRequest.indices(Strings.splitStringByCommaToArray((String) value)); + throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); } + percolateRequest.indices(nodeStringArrayValue(value)); } else if ("type".equals(entry.getKey())) { - percolateRequest.documentType((String) value); + percolateRequest.documentType(nodeStringValue(value, null)); } else if ("preference".equals(entry.getKey())) { - percolateRequest.preference((String) value); + percolateRequest.preference(nodeStringValue(value, null)); } else if ("routing".equals(entry.getKey())) { - percolateRequest.routing((String) value); - } else if ("ignore_unavailable".equals(entry.getKey()) || "ignoreUnavailable".equals(entry.getKey())) { - ignoreUnavailable = Boolean.valueOf((String) value); - } else if ("allow_no_indices".equals(entry.getKey()) || "allowNoIndices".equals(entry.getKey())) { - allowNoIndices = Boolean.valueOf((String) value); - } else if ("expand_wildcards".equals(entry.getKey()) || "expandWildcards".equals(entry.getKey())) { - String[] wildcards; - if (value instanceof String[]) { - wildcards = (String[]) value; - } else { - wildcards = Strings.splitStringByCommaToArray((String) value); - } - - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } + percolateRequest.routing(nodeStringValue(value, null)); } } } - percolateRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed, defaultOptions)); - } - - private Map parseToMap(XContentParser parser) throws IOException { - Map header = new HashMap<>(); - - String currentFieldName = null; - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - header.put(currentFieldName, parser.text()); - } else if (token == XContentParser.Token.START_ARRAY) { - header.put(currentFieldName, parseArray(parser)); - } - } - return header; - } - - private String[] parseArray(XContentParser parser) throws IOException { - final List list = new ArrayList<>(); - assert parser.currentToken() == XContentParser.Token.START_ARRAY; - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - list.add(parser.text()); - } - return list.toArray(new String[list.size()]); + percolateRequest.indicesOptions(IndicesOptions.fromMap(header, indicesOptions)); } private int findNextMarker(byte marker, int from, BytesReference data, int length) { diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java b/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java index e1309a5c095..732e08ac36b 100644 --- a/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/percolate/PercolateRequestBuilder.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -162,9 +164,9 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder reducers; PercolateShardResponse() { hls = new ArrayList<>(); @@ -69,6 +75,7 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { if (result.aggregations() != null) { this.aggregations = (InternalAggregations) result.aggregations(); } + this.reducers = result.reducers(); } } @@ -112,6 +119,10 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { return aggregations; } + public List reducers() { + return reducers; + } + public byte percolatorTypeId() { return percolatorTypeId; } @@ -144,6 +155,16 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { hls.add(fields); } aggregations = InternalAggregations.readOptionalAggregations(in); + if (in.readBoolean()) { + int reducersSize = in.readVInt(); + List reducers = new ArrayList<>(reducersSize); + for (int i = 0; i < reducersSize; i++) { + BytesReference type = in.readBytesReference(); + Reducer reducer = ReducerStreams.stream(type).readResult(in); + reducers.add((SiblingReducer) reducer); + } + this.reducers = reducers; + } } @Override @@ -169,5 +190,15 @@ public class PercolateShardResponse extends BroadcastShardOperationResponse { } } out.writeOptionalStreamable(aggregations); + if (reducers == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(reducers.size()); + for (Reducer reducer : reducers) { + out.writeBytesReference(reducer.type().stream()); + reducer.writeTo(out); + } + } } } diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java b/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java index f09e630f459..68fc57b2a17 100644 --- a/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java +++ b/src/main/java/org/elasticsearch/action/percolate/PercolateSourceBuilder.java @@ -29,6 +29,7 @@ import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; import org.elasticsearch.search.sort.SortBuilder; @@ -50,7 +51,7 @@ public class PercolateSourceBuilder implements ToXContent { private List sorts; private Boolean trackScores; private HighlightBuilder highlightBuilder; - private List aggregations; + private List aggregations; /** * Sets the document to run the percolate queries against. @@ -130,7 +131,7 @@ public class PercolateSourceBuilder implements ToXContent { /** * Add an aggregation definition. */ - public PercolateSourceBuilder addAggregation(AggregationBuilder aggregationBuilder) { + public PercolateSourceBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) { if (aggregations == null) { aggregations = Lists.newArrayList(); } diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java index 3992221b0f9..50476176c47 100644 --- a/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/TransportMultiPercolateAction.java @@ -60,18 +60,13 @@ public class TransportMultiPercolateAction extends HandledTransportAction listener) { final ClusterState clusterState = clusterService.state(); diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java index 7733ccf5069..e08cce30d43 100644 --- a/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/TransportPercolateAction.java @@ -62,7 +62,8 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, TransportGetAction getAction, ActionFilters actionFilters) { - super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters, + PercolateRequest.class, PercolateShardRequest.class, ThreadPool.Names.PERCOLATE); this.percolatorService = percolatorService; this.getAction = getAction; } @@ -95,16 +96,6 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< } } - @Override - protected String executor() { - return ThreadPool.Names.PERCOLATE; - } - - @Override - protected PercolateRequest newRequestInstance() { - return new PercolateRequest(); - } - @Override protected ClusterBlockException checkGlobalBlock(ClusterState state, PercolateRequest request) { return state.blocks().globalBlockedException(ClusterBlockLevel.READ); @@ -165,11 +156,6 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< } } - @Override - protected PercolateShardRequest newShardRequest() { - return new PercolateShardRequest(); - } - @Override protected PercolateShardRequest newShardRequest(int numShards, ShardRouting shard, PercolateRequest request) { return new PercolateShardRequest(shard.shardId(), numShards, request); @@ -187,7 +173,7 @@ public class TransportPercolateAction extends TransportBroadcastOperationAction< } @Override - protected PercolateShardResponse shardOperation(PercolateShardRequest request) throws ElasticsearchException { + protected PercolateShardResponse shardOperation(PercolateShardRequest request) { try { return percolatorService.percolate(request); } catch (Throwable e) { diff --git a/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java b/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java index 6795a4ed549..ce38859174f 100644 --- a/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java +++ b/src/main/java/org/elasticsearch/action/percolate/TransportShardMultiPercolateAction.java @@ -57,7 +57,8 @@ public class TransportShardMultiPercolateAction extends TransportShardSingleOper @Inject public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, PercolatorService percolatorService, ActionFilters actionFilters) { - super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, + Request.class, ThreadPool.Names.PERCOLATE); this.percolatorService = percolatorService; } @@ -66,16 +67,6 @@ public class TransportShardMultiPercolateAction extends TransportShardSingleOper return true; } - @Override - protected String executor() { - return ThreadPool.Names.PERCOLATE; - } - - @Override - protected Request newRequest() { - return new Request(); - } - @Override protected Response newResponse() { return new Response(); @@ -87,14 +78,14 @@ public class TransportShardMultiPercolateAction extends TransportShardSingleOper } @Override - protected ShardIterator shards(ClusterState state, InternalRequest request) throws ElasticsearchException { + protected ShardIterator shards(ClusterState state, InternalRequest request) { return clusterService.operationRouting().getShards( state, request.concreteIndex(), request.request().shardId(), request.request().preference ); } @Override - protected Response shardOperation(Request request, ShardId shardId) throws ElasticsearchException { + protected Response shardOperation(Request request, ShardId shardId) { // TODO: Look into combining the shard req's docs into one in memory index. Response response = new Response(); response.items = new ArrayList<>(request.items.size()); diff --git a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java index ba62cd9f65d..54f8c861bea 100644 --- a/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/MultiSearchRequest.java @@ -20,15 +20,13 @@ package org.elasticsearch.action.search; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchParseException; + import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -40,8 +38,12 @@ import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Map; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue; /** * A multi search API request. @@ -111,82 +113,35 @@ public class MultiSearchRequest extends ActionRequest implem searchRequest.searchType(searchType); IndicesOptions defaultOptions = IndicesOptions.strictExpandOpenAndForbidClosed(); - boolean ignoreUnavailable = defaultOptions.ignoreUnavailable(); - boolean allowNoIndices = defaultOptions.allowNoIndices(); - boolean expandWildcardsOpen = defaultOptions.expandWildcardsOpen(); - boolean expandWildcardsClosed = defaultOptions.expandWildcardsClosed(); + // now parse the action if (nextMarker - from > 0) { try (XContentParser parser = xContent.createParser(data.slice(from, nextMarker - from))) { - // Move to START_OBJECT, if token is null, its an empty data - XContentParser.Token token = parser.nextToken(); - if (token != null) { - assert token == XContentParser.Token.START_OBJECT; - String currentFieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token.isValue()) { - if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) { - if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed"); - } - searchRequest.indices(Strings.splitStringByCommaToArray(parser.text())); - } else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) { - searchRequest.types(Strings.splitStringByCommaToArray(parser.text())); - } else if ("search_type".equals(currentFieldName) || "searchType".equals(currentFieldName)) { - searchRequest.searchType(parser.text()); - } else if ("query_cache".equals(currentFieldName) || "queryCache".equals(currentFieldName)) { - searchRequest.queryCache(parser.booleanValue()); - } else if ("preference".equals(currentFieldName)) { - searchRequest.preference(parser.text()); - } else if ("routing".equals(currentFieldName)) { - searchRequest.routing(parser.text()); - } else if ("ignore_unavailable".equals(currentFieldName) || "ignoreUnavailable".equals(currentFieldName)) { - ignoreUnavailable = parser.booleanValue(); - } else if ("allow_no_indices".equals(currentFieldName) || "allowNoIndices".equals(currentFieldName)) { - allowNoIndices = parser.booleanValue(); - } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) { - String[] wildcards = Strings.splitStringByCommaToArray(parser.text()); - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } - } - } else if (token == XContentParser.Token.START_ARRAY) { - if ("index".equals(currentFieldName) || "indices".equals(currentFieldName)) { - if (!allowExplicitIndex) { - throw new ElasticsearchIllegalArgumentException("explicit index in multi search is not allowed"); - } - searchRequest.indices(parseArray(parser)); - } else if ("type".equals(currentFieldName) || "types".equals(currentFieldName)) { - searchRequest.types(parseArray(parser)); - } else if ("expand_wildcards".equals(currentFieldName) || "expandWildcards".equals(currentFieldName)) { - String[] wildcards = parseArray(parser); - for (String wildcard : wildcards) { - if ("open".equals(wildcard)) { - expandWildcardsOpen = true; - } else if ("closed".equals(wildcard)) { - expandWildcardsClosed = true; - } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); - } - } - } else { - throw new ElasticsearchParseException(currentFieldName + " doesn't support arrays"); - } + Map source = parser.map(); + for (Map.Entry entry : source.entrySet()) { + Object value = entry.getValue(); + if ("index".equals(entry.getKey()) || "indices".equals(entry.getKey())) { + if (!allowExplicitIndex) { + throw new IllegalArgumentException("explicit index in multi percolate is not allowed"); } + searchRequest.indices(nodeStringArrayValue(value)); + } else if ("type".equals(entry.getKey()) || "types".equals(entry.getKey())) { + searchRequest.types(nodeStringArrayValue(value)); + } else if ("search_type".equals(entry.getKey()) || "searchType".equals(entry.getKey())) { + searchRequest.searchType(nodeStringValue(value, null)); + } else if ("query_cache".equals(entry.getKey()) || "queryCache".equals(entry.getKey())) { + searchRequest.queryCache(nodeBooleanValue(value)); + } else if ("preference".equals(entry.getKey())) { + searchRequest.preference(nodeStringValue(value, null)); + } else if ("routing".equals(entry.getKey())) { + searchRequest.routing(nodeStringValue(value, null)); } } + defaultOptions = IndicesOptions.fromMap(source, defaultOptions); } } - searchRequest.indicesOptions(IndicesOptions.fromOptions(ignoreUnavailable, allowNoIndices, expandWildcardsOpen, expandWildcardsClosed, defaultOptions)); + searchRequest.indicesOptions(defaultOptions); // move pointers from = nextMarker + 1; diff --git a/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java b/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java index 9e3de989bbf..d0202ba219d 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java +++ b/src/main/java/org/elasticsearch/action/search/SearchPhaseExecutionException.java @@ -20,25 +20,30 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; +import java.util.*; + /** * */ public class SearchPhaseExecutionException extends ElasticsearchException { - private final String phaseName; private ShardSearchFailure[] shardFailures; public SearchPhaseExecutionException(String phaseName, String msg, ShardSearchFailure[] shardFailures) { - super(buildMessage(phaseName, msg, shardFailures)); + super(msg); this.phaseName = phaseName; this.shardFailures = shardFailures; } public SearchPhaseExecutionException(String phaseName, String msg, Throwable cause, ShardSearchFailure[] shardFailures) { - super(buildMessage(phaseName, msg, shardFailures), cause); + super(msg, cause); this.phaseName = phaseName; this.shardFailures = shardFailures; } @@ -60,10 +65,6 @@ public class SearchPhaseExecutionException extends ElasticsearchException { return status; } - public String phaseName() { - return phaseName; - } - public ShardSearchFailure[] shardFailures() { return shardFailures; } @@ -83,4 +84,90 @@ public class SearchPhaseExecutionException extends ElasticsearchException { } return sb.toString(); } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("phase", phaseName); + final boolean group = params.paramAsBoolean("group_shard_failures", true); // we group by default + builder.field("grouped", group); // notify that it's grouped + builder.field("failed_shards"); + builder.startArray(); + ShardSearchFailure[] failures = params.paramAsBoolean("group_shard_failures", true) ? groupBy(shardFailures) : shardFailures; + for (ShardSearchFailure failure : failures) { + builder.startObject(); + failure.toXContent(builder, params); + builder.endObject(); + } + builder.endArray(); + super.innerToXContent(builder, params); + + } + + private ShardSearchFailure[] groupBy(ShardSearchFailure[] failures) { + List uniqueFailures = new ArrayList<>(); + Set reasons = new HashSet<>(); + for (ShardSearchFailure failure : failures) { + GroupBy reason = new GroupBy(failure.getCause()); + if (reasons.contains(reason) == false) { + reasons.add(reason); + uniqueFailures.add(failure); + } + } + return uniqueFailures.toArray(new ShardSearchFailure[0]); + + } + + @Override + public ElasticsearchException[] guessRootCauses() { + ShardSearchFailure[] failures = groupBy(shardFailures); + List rootCauses = new ArrayList<>(failures.length); + for (ShardSearchFailure failure : failures) { + ElasticsearchException[] guessRootCauses = ElasticsearchException.guessRootCauses(failure.getCause()); + rootCauses.addAll(Arrays.asList(guessRootCauses)); + } + return rootCauses.toArray(new ElasticsearchException[0]); + } + + @Override + public String toString() { + return buildMessage(phaseName, getMessage(), shardFailures); + } + + static class GroupBy { + final String reason; + final Index index; + final Class causeType; + + public GroupBy(Throwable t) { + if (t instanceof IndexException) { + index = ((IndexException) t).index(); + } else { + index = null; + } + reason = t.getMessage(); + causeType = t.getClass(); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + GroupBy groupBy = (GroupBy) o; + + if (!causeType.equals(groupBy.causeType)) return false; + if (index != null ? !index.equals(groupBy.index) : groupBy.index != null) return false; + if (reason != null ? !reason.equals(groupBy.reason) : groupBy.reason != null) return false; + + return true; + } + + @Override + public int hashCode() { + int result = reason != null ? reason.hashCode() : 0; + result = 31 * result + (index != null ? index.hashCode() : 0); + result = 31 * result + causeType.hashCode(); + return result; + } + } } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 1c16f3a8b35..8e1da31affa 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.search; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -152,11 +151,11 @@ public class SearchRequest extends ActionRequest implements Indic @Override public SearchRequest indices(String... indices) { if (indices == null) { - throw new ElasticsearchIllegalArgumentException("indices must not be null"); + throw new IllegalArgumentException("indices must not be null"); } else { for (int i = 0; i < indices.length; i++) { if (indices[i] == null) { - throw new ElasticsearchIllegalArgumentException("indices[" + i + "] must not be null"); + throw new IllegalArgumentException("indices[" + i + "] must not be null"); } } } @@ -241,7 +240,7 @@ public class SearchRequest extends ActionRequest implements Indic * one of "dfs_query_then_fetch"/"dfsQueryThenFetch", "dfs_query_and_fetch"/"dfsQueryAndFetch", * "query_then_fetch"/"queryThenFetch", and "query_and_fetch"/"queryAndFetch". */ - public SearchRequest searchType(String searchType) throws ElasticsearchIllegalArgumentException { + public SearchRequest searchType(String searchType) { return searchType(SearchType.fromString(searchType)); } diff --git a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 59d6db804b0..349da4103f0 100644 --- a/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -19,7 +19,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.support.IndicesOptions; @@ -28,11 +28,13 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder; import org.elasticsearch.search.highlight.HighlightBuilder; @@ -84,7 +86,7 @@ public class SearchRequestBuilder extends ActionRequestBuilder { private final ClusterService clusterService; - private final TransportSearchAction searchAction; @Inject public TransportMultiSearchAction(Settings settings, ThreadPool threadPool, TransportService transportService, ClusterService clusterService, TransportSearchAction searchAction, ActionFilters actionFilters) { - super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters); + super(settings, MultiSearchAction.NAME, threadPool, transportService, actionFilters, MultiSearchRequest.class); this.clusterService = clusterService; this.searchAction = searchAction; } @@ -82,9 +81,4 @@ public class TransportMultiSearchAction extends HandledTransportAction { private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction; - private final TransportSearchScrollQueryAndFetchAction queryAndFetchAction; - private final TransportSearchScrollScanAction scanAction; @Inject @@ -54,7 +48,7 @@ public class TransportSearchScrollAction extends HandledTransportAction[] context = new Tuple[contextSize]; @@ -110,7 +108,7 @@ public abstract class TransportSearchHelper { String element = elements[index++]; int sep = element.indexOf(':'); if (sep == -1) { - throw new ElasticsearchIllegalArgumentException("Malformed scrollId [" + scrollId + "]"); + throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]"); } context[i] = new Tuple<>(element.substring(sep + 1), Long.parseLong(element.substring(0, sep))); } diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java index 189b035a918..e2cf4d87f53 100644 --- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java +++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.IntArrayList; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.search.ReduceSearchPhaseException; @@ -194,7 +193,7 @@ public abstract class TransportSearchTypeAction extends TransportAction expectedTotalOps) { - raiseEarlyFailure(new ElasticsearchIllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]")); + raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]")); } } diff --git a/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java index 410925727a7..7d69af266b3 100644 --- a/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java +++ b/src/main/java/org/elasticsearch/action/suggest/TransportSuggestAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.suggest; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.DefaultShardOperationFailedException; @@ -62,32 +61,17 @@ import static com.google.common.collect.Lists.newArrayList; public class TransportSuggestAction extends TransportBroadcastOperationAction { private final IndicesService indicesService; - private final SuggestPhase suggestPhase; @Inject public TransportSuggestAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, SuggestPhase suggestPhase, ActionFilters actionFilters) { - super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters); + super(settings, SuggestAction.NAME, threadPool, clusterService, transportService, actionFilters, + SuggestRequest.class, ShardSuggestRequest.class, ThreadPool.Names.SUGGEST); this.indicesService = indicesService; this.suggestPhase = suggestPhase; } - @Override - protected String executor() { - return ThreadPool.Names.SUGGEST; - } - - @Override - protected SuggestRequest newRequestInstance() { - return new SuggestRequest(); - } - - @Override - protected ShardSuggestRequest newShardRequest() { - return new ShardSuggestRequest(); - } - @Override protected ShardSuggestRequest newShardRequest(int numShards, ShardRouting shard, SuggestRequest request) { return new ShardSuggestRequest(shard.shardId(), request); @@ -143,7 +127,7 @@ public class TransportSuggestAction extends TransportBroadcastOperationAction 0) { parser = XContentFactory.xContent(suggest).createParser(suggest); if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("suggest content missing"); + throw new IllegalArgumentException("suggest content missing"); } final SuggestionSearchContext context = suggestPhase.parseElement().parseInternal(parser, indexService.mapperService(), request.shardId().getIndex(), request.shardId().id()); final Suggest result = suggestPhase.execute(context, searcher.reader()); diff --git a/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java b/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java index 1aa0ead9217..e1da5ee41f5 100644 --- a/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java +++ b/src/main/java/org/elasticsearch/action/support/AdapterActionFuture.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; @@ -40,52 +39,56 @@ public abstract class AdapterActionFuture extends BaseFuture implements private Throwable rootFailure; @Override - public T actionGet() throws ElasticsearchException { + public T actionGet() { try { return get(); } catch (InterruptedException e) { - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { throw rethrowExecutionException(e); } } @Override - public T actionGet(String timeout) throws ElasticsearchException { + public T actionGet(String timeout) { return actionGet(TimeValue.parseTimeValue(timeout, null)); } @Override - public T actionGet(long timeoutMillis) throws ElasticsearchException { + public T actionGet(long timeoutMillis) { return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); } @Override - public T actionGet(TimeValue timeout) throws ElasticsearchException { + public T actionGet(TimeValue timeout) { return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); } @Override - public T actionGet(long timeout, TimeUnit unit) throws ElasticsearchException { + public T actionGet(long timeout, TimeUnit unit) { try { return get(timeout, unit); } catch (TimeoutException e) { throw new ElasticsearchTimeoutException(e.getMessage()); } catch (InterruptedException e) { - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { throw rethrowExecutionException(e); } } - static ElasticsearchException rethrowExecutionException(ExecutionException e) { + static RuntimeException rethrowExecutionException(ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { ElasticsearchException esEx = (ElasticsearchException) e.getCause(); Throwable root = esEx.unwrapCause(); if (root instanceof ElasticsearchException) { return (ElasticsearchException) root; + } else if (root instanceof RuntimeException) { + return (RuntimeException) root; } return new UncategorizedExecutionException("Failed execution", root); + } else if (e.getCause() instanceof RuntimeException) { + return (RuntimeException) e.getCause(); } else { return new UncategorizedExecutionException("Failed execution", e); } diff --git a/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java b/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java index 3a7e90096c1..3be7b7d2aee 100644 --- a/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java +++ b/src/main/java/org/elasticsearch/action/support/DefaultShardOperationFailedException.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.support; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,30 +40,25 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile private int shardId; - private String reason; + private Throwable reason; private RestStatus status; private DefaultShardOperationFailedException() { - } public DefaultShardOperationFailedException(IndexShardException e) { this.index = e.shardId().index().name(); this.shardId = e.shardId().id(); - this.reason = detailedMessage(e); + this.reason = e; this.status = e.status(); } public DefaultShardOperationFailedException(String index, int shardId, Throwable t) { this.index = index; this.shardId = shardId; - this.reason = detailedMessage(t); - if (t != null && t instanceof ElasticsearchException) { - status = ((ElasticsearchException) t).status(); - } else { - status = RestStatus.INTERNAL_SERVER_ERROR; - } + this.reason = t; + status = ExceptionsHelper.status(t); } @Override @@ -77,7 +73,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile @Override public String reason() { - return this.reason; + return detailedMessage(reason); } @Override @@ -97,7 +93,7 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile index = in.readString(); } shardId = in.readVInt(); - reason = in.readString(); + reason = in.readThrowable(); status = RestStatus.readFrom(in); } @@ -110,12 +106,12 @@ public class DefaultShardOperationFailedException implements ShardOperationFaile out.writeString(index); } out.writeVInt(shardId); - out.writeString(reason); + out.writeThrowable(reason); RestStatus.writeTo(out, status); } @Override public String toString() { - return "[" + index + "][" + shardId + "] failed, reason [" + reason + "]"; + return "[" + index + "][" + shardId + "] failed, reason [" + reason() + "]"; } } diff --git a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java index 0324114bf53..9722bef5990 100644 --- a/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java +++ b/src/main/java/org/elasticsearch/action/support/DestructiveOperations.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.node.settings.NodeSettingsService; @@ -55,15 +54,15 @@ public final class DestructiveOperations implements NodeSettingsService.Listener } if (aliasesOrIndices == null || aliasesOrIndices.length == 0) { - throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed"); + throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed"); } else if (aliasesOrIndices.length == 1) { if (hasWildcardUsage(aliasesOrIndices[0])) { - throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed"); + throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed"); } } else { for (String aliasesOrIndex : aliasesOrIndices) { if (hasWildcardUsage(aliasesOrIndex)) { - throw new ElasticsearchIllegalArgumentException("Wildcard expressions or all indices are not allowed"); + throw new IllegalArgumentException("Wildcard expressions or all indices are not allowed"); } } } diff --git a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java index b69032f3cdf..f939893a98e 100644 --- a/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/HandledTransportAction.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportRequestHandler; import org.elasticsearch.transport.TransportService; /** @@ -32,30 +32,12 @@ import org.elasticsearch.transport.TransportService; */ public abstract class HandledTransportAction extends TransportAction{ - /** - * Sub classes implement this call to get new instance of a Request object - * @return Request - */ - protected abstract Request newRequestInstance(); - - protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters){ + protected HandledTransportAction(Settings settings, String actionName, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters, Class request) { super(settings, actionName, threadPool, actionFilters); - transportService.registerHandler(actionName, new TransportHandler() { - @Override - public Request newInstance(){ - return newRequestInstance(); - } - }); + transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler()); } - abstract class TransportHandler extends BaseTransportRequestHandler{ - - /** - * Call to get an instance of type Request - * @return Request - */ - @Override - public abstract Request newInstance(); + class TransportHandler implements TransportRequestHandler { @Override public final void messageReceived(final Request request, final TransportChannel channel) throws Exception { @@ -82,12 +64,6 @@ public abstract class HandledTransportAction= VALUES.length) { - throw new ElasticsearchIllegalArgumentException("No valid missing index type id: " + id); + throw new IllegalArgumentException("No valid missing index type id: " + id); } return VALUES[id]; } @@ -136,20 +139,33 @@ public class IndicesOptions { } public static IndicesOptions fromRequest(RestRequest request, IndicesOptions defaultSettings) { - String sWildcards = request.param("expand_wildcards"); - String sIgnoreUnavailable = request.param("ignore_unavailable"); - String sAllowNoIndices = request.param("allow_no_indices"); - if (sWildcards == null && sIgnoreUnavailable == null && sAllowNoIndices == null) { + return fromParameters( + request.param("expand_wildcards"), + request.param("ignore_unavailable"), + request.param("allow_no_indices"), + defaultSettings); + } + + public static IndicesOptions fromMap(Map map, IndicesOptions defaultSettings) { + return fromParameters( + map.containsKey("expand_wildcards") ? map.get("expand_wildcards") : map.get("expandWildcards"), + map.containsKey("ignore_unavailable") ? map.get("ignore_unavailable") : map.get("ignoreUnavailable"), + map.containsKey("allow_no_indices") ? map.get("allow_no_indices") : map.get("allowNoIndices"), + defaultSettings); + } + + public static IndicesOptions fromParameters(Object wildcardsString, Object ignoreUnavailableString, Object allowNoIndicesString, IndicesOptions defaultSettings) { + if (wildcardsString == null && ignoreUnavailableString == null && allowNoIndicesString == null) { return defaultSettings; } boolean expandWildcardsOpen = false; boolean expandWildcardsClosed = false; - if (sWildcards == null) { + if (wildcardsString == null) { expandWildcardsOpen = defaultSettings.expandWildcardsOpen(); expandWildcardsClosed = defaultSettings.expandWildcardsClosed(); } else { - String[] wildcards = Strings.splitStringByCommaToArray(sWildcards); + String[] wildcards = nodeStringArrayValue(wildcardsString); for (String wildcard : wildcards) { if ("open".equals(wildcard)) { expandWildcardsOpen = true; @@ -162,15 +178,15 @@ public class IndicesOptions { expandWildcardsOpen = true; expandWildcardsClosed = true; } else { - throw new ElasticsearchIllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); + throw new IllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); } } } //note that allowAliasesToMultipleIndices is not exposed, always true (only for internal use) return fromOptions( - toBool(sIgnoreUnavailable, defaultSettings.ignoreUnavailable()), - toBool(sAllowNoIndices, defaultSettings.allowNoIndices()), + nodeBooleanValue(ignoreUnavailableString, defaultSettings.ignoreUnavailable()), + nodeBooleanValue(allowNoIndicesString, defaultSettings.allowNoIndices()), expandWildcardsOpen, expandWildcardsClosed, defaultSettings.allowAliasesToMultipleIndices(), @@ -245,10 +261,16 @@ public class IndicesOptions { return id; } - private static boolean toBool(String sValue, boolean defaultValue) { - if (sValue == null) { - return defaultValue; - } - return !(sValue.equals("false") || sValue.equals("0") || sValue.equals("off")); + @Override + public String toString() { + return "IndicesOptions[" + + "id=" + id + + ", ignore_unavailable=" + ignoreUnavailable() + + ", allow_no_indices=" + allowNoIndices() + + ", expand_wildcards_open=" + expandWildcardsOpen() + + ", expand_wildcards_closed=" + expandWildcardsClosed() + + ", allow_alisases_to_multiple_indices=" + allowAliasesToMultipleIndices() + + ", forbid_closed_indices=" + forbidClosedIndices() + + ']'; } } diff --git a/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java b/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java index e60ed169085..37497837ff8 100644 --- a/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java +++ b/src/main/java/org/elasticsearch/action/support/QuerySourceBuilder.java @@ -19,6 +19,7 @@ package org.elasticsearch.action.support; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -74,4 +75,15 @@ public class QuerySourceBuilder implements ToXContent { throw new SearchSourceBuilderException("Failed to build search source", e); } } + + @Override + public String toString() { + try { + XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint(); + toXContent(builder, ToXContent.EMPTY_PARAMS); + return builder.string(); + } catch (Exception e) { + return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}"; + } + } } diff --git a/src/main/java/org/elasticsearch/action/support/TransportAction.java b/src/main/java/org/elasticsearch/action/support/TransportAction.java index 4637ba00c58..7d3f3564693 100644 --- a/src/main/java/org/elasticsearch/action/support/TransportAction.java +++ b/src/main/java/org/elasticsearch/action/support/TransportAction.java @@ -47,7 +47,7 @@ public abstract class TransportAction execute(Request request) throws ElasticsearchException { + public final ActionFuture execute(Request request) { PlainActionFuture future = newFuture(); // since we don't have a listener, and we release a possible lock with the future // there is no need to execute it under a listener thread diff --git a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java index 09d7bd55447..b0bcad2e86b 100644 --- a/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/broadcast/TransportBroadcastOperationAction.java @@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.NoShardAvailableActionException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -53,17 +52,16 @@ public abstract class TransportBroadcastOperationAction request, Class shardRequest, String shardExecutor) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; this.threadPool = threadPool; this.transportShardAction = actionName + "[s]"; - this.executor = executor(); - transportService.registerHandler(transportShardAction, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, shardRequest, shardExecutor, new ShardTransportHandler()); } @Override @@ -71,17 +69,13 @@ public abstract class TransportBroadcastOperationAction { - - @Override - public ShardRequest newInstance() { - return newShardRequest(); - } - - @Override - public String executor() { - return executor; - } + class ShardTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final ShardRequest request, final TransportChannel channel) throws Exception { diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java index 6a459e50d49..15e90c0784d 100644 --- a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeOperationAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; @@ -42,28 +43,23 @@ import org.elasticsearch.transport.*; /** * A base class for operations that needs to be performed on the master node. */ -public abstract class TransportMasterNodeOperationAction extends TransportAction { +public abstract class TransportMasterNodeOperationAction extends HandledTransportAction { protected final TransportService transportService; - protected final ClusterService clusterService; final String executor; - protected TransportMasterNodeOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + protected TransportMasterNodeOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, + Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.transportService = transportService; this.clusterService = clusterService; - this.executor = executor(); - - transportService.registerHandler(actionName, new TransportHandler()); } protected abstract String executor(); - protected abstract Request newRequest(); - protected abstract Response newResponse(); protected abstract void masterOperation(Request request, ClusterState state, ActionListener listener) throws Exception; @@ -225,42 +221,4 @@ public abstract class TransportMasterNodeOperationAction { - - @Override - public Request newInstance() { - return newRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - // we just send back a response, no need to fork a listener - request.listenerThreaded(false); - execute(request, new ActionListener() { - @Override - public void onResponse(Response response) { - try { - channel.sendResponse(response); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send response", e1); - } - } - }); - } - } } diff --git a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java index 7d7453f581d..383de7ceb53 100644 --- a/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeReadOperationAction.java @@ -36,8 +36,8 @@ public abstract class TransportMasterNodeReadOperationAction request) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters,request); this.forceLocal = settings.getAsBoolean(FORCE_LOCAL_SETTING, null); } diff --git a/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java b/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java index 00d3e566bc6..8ecff553bce 100644 --- a/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java +++ b/src/main/java/org/elasticsearch/action/support/master/info/TransportClusterInfoAction.java @@ -33,8 +33,8 @@ import org.elasticsearch.transport.TransportService; */ public abstract class TransportClusterInfoAction extends TransportMasterNodeReadOperationAction { - public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters) { - super(settings, actionName, transportService, clusterService, threadPool, actionFilters); + public TransportClusterInfoAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, Class request) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters, request); } @Override @@ -44,10 +44,10 @@ public abstract class TransportClusterInfoAction listener) throws ElasticsearchException { + protected final void masterOperation(final Request request, final ClusterState state, final ActionListener listener) { String[] concreteIndices = state.metaData().concreteIndices(request.indicesOptions(), request.indices()); doMasterOperation(request, concreteIndices, state, listener); } - protected abstract void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, final ActionListener listener) throws ElasticsearchException; + protected abstract void doMasterOperation(Request request, String[] concreteIndices, ClusterState state, final ActionListener listener); } diff --git a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java index 358f7d0860f..9331a5d4a65 100644 --- a/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/nodes/TransportNodesOperationAction.java @@ -25,7 +25,6 @@ import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.NoSuchNodeException; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; -import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -48,19 +47,18 @@ public abstract class TransportNodesOperationAction request, Class nodeRequest, String nodeExecutor) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterName = clusterName; this.clusterService = clusterService; this.transportService = transportService; this.transportNodeAction = actionName + "[n]"; - this.executor = executor(); - transportService.registerHandler(transportNodeAction, new NodeTransportHandler()); + transportService.registerRequestHandler(transportNodeAction, nodeRequest, nodeExecutor, new NodeTransportHandler()); } @Override @@ -72,17 +70,13 @@ public abstract class TransportNodesOperationAction { - - @Override - public NodeRequest newInstance() { - return newNodeRequest(); - } + class NodeTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final NodeRequest request, final TransportChannel channel) throws Exception { channel.sendResponse(nodeOperation(request)); } - - @Override - public String toString() { - return transportNodeAction; - } - - @Override - public String executor() { - return executor; - } } } diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java deleted file mode 100644 index 008008b1e6c..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/IndexReplicationOperationRequest.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.*; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; - -/** - * Request used within {@link org.elasticsearch.action.support.replication.TransportIndexReplicationOperationAction}. - * Since the corresponding action is internal that gets always executed locally, this request never gets sent over the transport. - * The specified index is expected to be a concrete index. Relies on input validation done by the caller actions. - */ -public abstract class IndexReplicationOperationRequest extends ActionRequest implements IndicesRequest { - - private final TimeValue timeout; - private final String index; - private final WriteConsistencyLevel consistencyLevel; - private final OriginalIndices originalIndices; - - protected IndexReplicationOperationRequest(String index, TimeValue timeout, WriteConsistencyLevel consistencyLevel, - String[] originalIndices, IndicesOptions originalIndicesOptions, ActionRequest request) { - super(request); - this.index = index; - this.timeout = timeout; - this.consistencyLevel = consistencyLevel; - this.originalIndices = new OriginalIndices(originalIndices, originalIndicesOptions); - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - public TimeValue timeout() { - return timeout; - } - - public String index() { - return this.index; - } - - @Override - public String[] indices() { - return originalIndices.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return originalIndices.indicesOptions(); - } - - public WriteConsistencyLevel consistencyLevel() { - return this.consistencyLevel; - } - - @Override - public final void readFrom(StreamInput in) throws IOException { - throw new UnsupportedOperationException("IndexReplicationOperationRequest is not supposed to be sent over the transport"); - } - - @Override - public final void writeTo(StreamOutput out) throws IOException { - throw new UnsupportedOperationException("IndexReplicationOperationRequest is not supposed to be sent over the transport"); - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java deleted file mode 100644 index 5113628fa6f..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequest.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.unit.TimeValue; - -import java.io.IOException; - -/** - * - */ -public abstract class IndicesReplicationOperationRequest extends ActionRequest implements IndicesRequest.Replaceable { - - protected TimeValue timeout = ShardReplicationOperationRequest.DEFAULT_TIMEOUT; - protected String[] indices; - private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, false); - - protected WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT; - - public TimeValue timeout() { - return timeout; - } - - protected IndicesReplicationOperationRequest() { - } - - protected IndicesReplicationOperationRequest(ActionRequest actionRequest) { - super(actionRequest); - } - - /** - * A timeout to wait if the delete by query operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final T timeout(TimeValue timeout) { - this.timeout = timeout; - return (T) this; - } - - /** - * A timeout to wait if the delete by query operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public T timeout(String timeout) { - this.timeout = TimeValue.parseTimeValue(timeout, null); - return (T) this; - } - - @Override - public String[] indices() { - return this.indices; - } - - @Override - public IndicesOptions indicesOptions() { - return indicesOptions; - } - - @SuppressWarnings("unchecked") - public T indicesOptions(IndicesOptions indicesOptions) { - if (indicesOptions == null) { - throw new IllegalArgumentException("IndicesOptions must not be null"); - } - this.indicesOptions = indicesOptions; - return (T) this; - } - - /** - * The indices the request will execute against. - */ - @SuppressWarnings("unchecked") - @Override - public final T indices(String[] indices) { - this.indices = indices; - return (T) this; - } - - public WriteConsistencyLevel consistencyLevel() { - return this.consistencyLevel; - } - - /** - * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} - */ - @SuppressWarnings("unchecked") - public final T consistencyLevel(WriteConsistencyLevel consistencyLevel) { - if (consistencyLevel == null) { - throw new IllegalArgumentException("WriteConsistencyLevel must not be null"); - } - this.consistencyLevel = consistencyLevel; - return (T) this; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - consistencyLevel = WriteConsistencyLevel.fromId(in.readByte()); - timeout = TimeValue.readTimeValue(in); - indices = in.readStringArray(); - indicesOptions = IndicesOptions.readIndicesOptions(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeByte(consistencyLevel.id()); - timeout.writeTo(out); - out.writeStringArrayNullable(indices); - indicesOptions.writeIndicesOptions(out); - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java b/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java deleted file mode 100644 index 75598a6d295..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/IndicesReplicationOperationRequestBuilder.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.unit.TimeValue; - -/** - */ -public abstract class IndicesReplicationOperationRequestBuilder, Response extends ActionResponse, RequestBuilder extends IndicesReplicationOperationRequestBuilder> - extends ActionRequestBuilder { - - protected IndicesReplicationOperationRequestBuilder(Client client, Request request) { - super(client, request); - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final RequestBuilder setTimeout(TimeValue timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } - - /** - * A timeout to wait if the index operation can't be performed immediately. Defaults to 1m. - */ - @SuppressWarnings("unchecked") - public final RequestBuilder setTimeout(String timeout) { - request.timeout(timeout); - return (RequestBuilder) this; - } - - @SuppressWarnings("unchecked") - public final RequestBuilder setIndices(String... indices) { - request.indices(indices); - return (RequestBuilder) this; - } - - /** - * Specifies what type of requested indices to ignore and how to deal with wildcard indices expressions. - * For example indices that don't exist. - */ - @SuppressWarnings("unchecked") - public RequestBuilder setIndicesOptions(IndicesOptions indicesOptions) { - request().indicesOptions(indicesOptions); - return (RequestBuilder) this; - } - - /** - * Sets the consistency level of write. Defaults to {@link org.elasticsearch.action.WriteConsistencyLevel#DEFAULT} - */ - @SuppressWarnings("unchecked") - public RequestBuilder setConsistencyLevel(WriteConsistencyLevel consistencyLevel) { - request.consistencyLevel(consistencyLevel); - return (RequestBuilder) this; - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java index dc7461020be..0d9730c246a 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java +++ b/src/main/java/org/elasticsearch/action/support/replication/ShardReplicationOperationRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; import java.util.concurrent.TimeUnit; @@ -40,8 +41,9 @@ public abstract class ShardReplicationOperationRequest - extends TransportAction { - - protected final ClusterService clusterService; - - protected final TransportShardReplicationOperationAction shardAction; - - protected TransportIndexReplicationOperationAction(Settings settings, String actionName, ClusterService clusterService, - ThreadPool threadPool, TransportShardReplicationOperationAction shardAction, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); - this.clusterService = clusterService; - this.shardAction = shardAction; - } - - @Override - protected void doExecute(final Request request, final ActionListener listener) { - ClusterState clusterState = clusterService.state(); - ClusterBlockException blockException = checkGlobalBlock(clusterState, request); - if (blockException != null) { - throw blockException; - } - blockException = checkRequestBlock(clusterState, request); - if (blockException != null) { - throw blockException; - } - - final GroupShardsIterator groups; - try { - groups = shards(request); - } catch (Throwable e) { - listener.onFailure(e); - return; - } - final AtomicInteger indexCounter = new AtomicInteger(); - final AtomicInteger failureCounter = new AtomicInteger(); - final AtomicInteger completionCounter = new AtomicInteger(groups.size()); - final AtomicReferenceArray shardsResponses = new AtomicReferenceArray<>(groups.size()); - - for (final ShardIterator shardIt : groups) { - final ShardRequest shardRequest = newShardRequestInstance(request, shardIt.shardId().id()); - shardRequest.operationThreaded(true); - // no need for threaded listener, we will fork when its done based on the index request - shardRequest.listenerThreaded(false); - shardAction.execute(shardRequest, new ActionListener() { - @Override - public void onResponse(ShardResponse result) { - shardsResponses.set(indexCounter.getAndIncrement(), new ShardActionResult(result)); - returnIfNeeded(); - } - - @Override - public void onFailure(Throwable e) { - failureCounter.getAndIncrement(); - int index = indexCounter.getAndIncrement(); - // this is a failure for an entire shard group, constructs shard info accordingly - final RestStatus status; - if (e != null && e instanceof ElasticsearchException) { - status = ((ElasticsearchException) e).status(); - } else { - status = RestStatus.INTERNAL_SERVER_ERROR; - } - Failure failure = new Failure(request.index(), shardIt.shardId().id(), null, - "Failed to execute on all shard copies [" + ExceptionsHelper.detailedMessage(e) + "]", status, true); - shardsResponses.set(index, new ShardActionResult(new ActionWriteResponse.ShardInfo(shardIt.size(), 0, failure))); - returnIfNeeded(); - } - - private void returnIfNeeded() { - if (completionCounter.decrementAndGet() == 0) { - List responses = new ArrayList<>(); - List failureList = new ArrayList<>(); - - int total = 0; - int successful = 0; - for (int i = 0; i < shardsResponses.length(); i++) { - ShardActionResult shardActionResult = shardsResponses.get(i); - final ActionWriteResponse.ShardInfo sf; - if (shardActionResult.isFailure()) { - assert shardActionResult.shardInfoOnFailure != null; - sf = shardActionResult.shardInfoOnFailure; - } else { - responses.add(shardActionResult.shardResponse); - sf = shardActionResult.shardResponse.getShardInfo(); - } - total += sf.getTotal(); - successful += sf.getSuccessful(); - failureList.addAll(Arrays.asList(sf.getFailures())); - } - assert failureList.size() == 0 || numShardGroupFailures(failureList) == failureCounter.get(); - - final Failure[] failures; - if (failureList.isEmpty()) { - failures = ActionWriteResponse.EMPTY; - } else { - failures = failureList.toArray(new Failure[failureList.size()]); - } - listener.onResponse(newResponseInstance(request, responses, new ActionWriteResponse.ShardInfo(total, successful, failures))); - } - } - - private int numShardGroupFailures(List failures) { - int numShardGroupFailures = 0; - for (Failure failure : failures) { - if (failure.primary()) { - numShardGroupFailures++; - } - } - return numShardGroupFailures; - } - }); - - } - } - - protected abstract Response newResponseInstance(Request request, List shardResponses, ActionWriteResponse.ShardInfo shardInfo); - - protected abstract GroupShardsIterator shards(Request request) throws ElasticsearchException; - - protected abstract ShardRequest newShardRequestInstance(Request request, int shardId); - - protected ClusterBlockException checkGlobalBlock(ClusterState state, Request request) { - return state.blocks().globalBlockedException(ClusterBlockLevel.WRITE); - } - - protected ClusterBlockException checkRequestBlock(ClusterState state, Request request) { - return state.blocks().indexBlockedException(ClusterBlockLevel.WRITE, request.index()); - } - - private class ShardActionResult { - - private final ShardResponse shardResponse; - private final ActionWriteResponse.ShardInfo shardInfoOnFailure; - - private ShardActionResult(ShardResponse shardResponse) { - assert shardResponse != null; - this.shardResponse = shardResponse; - this.shardInfoOnFailure = null; - } - - private ShardActionResult(ActionWriteResponse.ShardInfo shardInfoOnFailure) { - assert shardInfoOnFailure != null; - this.shardInfoOnFailure = shardInfoOnFailure; - this.shardResponse = null; - } - - boolean isFailure() { - return shardInfoOnFailure != null; - } - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java deleted file mode 100644 index e2a811202c1..00000000000 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportIndicesReplicationOperationAction.java +++ /dev/null @@ -1,168 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.action.support.replication; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.TransportAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.BaseTransportRequestHandler; -import org.elasticsearch.transport.TransportChannel; -import org.elasticsearch.transport.TransportService; - -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReferenceArray; - -/** - */ -public abstract class TransportIndicesReplicationOperationAction - extends TransportAction { - - protected final ClusterService clusterService; - - protected final TransportIndexReplicationOperationAction indexAction; - - protected TransportIndicesReplicationOperationAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, - TransportIndexReplicationOperationAction indexAction, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); - this.clusterService = clusterService; - this.indexAction = indexAction; - - transportService.registerHandler(actionName, new TransportHandler()); - } - - - protected abstract Map> resolveRouting(ClusterState clusterState, Request request) throws ElasticsearchException; - - @Override - protected void doExecute(final Request request, final ActionListener listener) { - ClusterState clusterState = clusterService.state(); - ClusterBlockException blockException = checkGlobalBlock(clusterState, request); - if (blockException != null) { - throw blockException; - } - // get actual indices - String[] concreteIndices = clusterState.metaData().concreteIndices(request.indicesOptions(), request.indices()); - blockException = checkRequestBlock(clusterState, request, concreteIndices); - if (blockException != null) { - throw blockException; - } - - final AtomicInteger indexCounter = new AtomicInteger(); - final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length); - final AtomicReferenceArray indexResponses = new AtomicReferenceArray<>(concreteIndices.length); - final long startTimeInMillis = System.currentTimeMillis(); - - Map> routingMap = resolveRouting(clusterState, request); - if (concreteIndices.length == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } else { - for (final String index : concreteIndices) { - Set routing = null; - if (routingMap != null) { - routing = routingMap.get(index); - } - IndexRequest indexRequest = newIndexRequestInstance(request, index, routing, startTimeInMillis); - // no threading needed, all is done on the index replication one - indexRequest.listenerThreaded(false); - indexAction.execute(indexRequest, new ActionListener() { - @Override - public void onResponse(IndexResponse result) { - indexResponses.set(indexCounter.getAndIncrement(), result); - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } - } - - @Override - public void onFailure(Throwable e) { - int index = indexCounter.getAndIncrement(); - if (accumulateExceptions()) { - indexResponses.set(index, e); - } - if (completionCounter.decrementAndGet() == 0) { - listener.onResponse(newResponseInstance(request, indexResponses)); - } - } - }); - } - } - } - - protected abstract Request newRequestInstance(); - - protected abstract Response newResponseInstance(Request request, AtomicReferenceArray indexResponses); - - protected abstract IndexRequest newIndexRequestInstance(Request request, String index, Set routing, long startTimeInMillis); - - protected abstract boolean accumulateExceptions(); - - protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request); - - protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices); - - private class TransportHandler extends BaseTransportRequestHandler { - - @Override - public Request newInstance() { - return newRequestInstance(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(final Request request, final TransportChannel channel) throws Exception { - // no need for a threaded listener, since we just send a response - request.listenerThreaded(false); - execute(request, new ActionListener() { - @Override - public void onResponse(Response result) { - try { - channel.sendResponse(result); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send error response for action [" + actionName + "] and request [" + request + "]", e1); - } - } - }); - } - } -} diff --git a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java index df99d045177..0e488a602ff 100644 --- a/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/replication/TransportShardReplicationOperationAction.java @@ -20,12 +20,12 @@ package org.elasticsearch.action.support.replication; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; -import org.elasticsearch.action.*; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.ClusterService; @@ -36,11 +36,13 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.ShardIterator; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; @@ -49,12 +51,21 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.engine.DocumentAlreadyExistsException; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.IndexShardException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.node.NodeClosedException; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.*; +import org.elasticsearch.transport.BaseTransportResponseHandler; +import org.elasticsearch.transport.ConnectTransportException; +import org.elasticsearch.transport.EmptyTransportResponseHandler; +import org.elasticsearch.transport.TransportChannel; +import org.elasticsearch.transport.TransportException; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.TransportRequestOptions; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; import java.io.IOException; import java.util.Map; @@ -79,7 +90,8 @@ public abstract class TransportShardReplicationOperationAction request, Class replicaRequest, String executor) { super(settings, actionName, threadPool, actionFilters); this.transportService = transportService; this.clusterService = clusterService; @@ -87,11 +99,12 @@ public abstract class TransportShardReplicationOperationAction listener) { - new AsyncShardOperationAction(request, listener).start(); + new PrimaryPhase(request, listener).run(); } - protected abstract Request newRequestInstance(); - - protected abstract ReplicaRequest newReplicaRequestInstance(); - protected abstract Response newResponseInstance(); - protected abstract String executor(); - /** - * @return A tuple containing not null values, as first value the result of the primary operation and as second value - * the request to be executed on the replica shards. + * @return A tuple containing not null values, as first value the result of the primary operation and as second value + * the request to be executed on the replica shards. */ - protected abstract Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; + protected abstract Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable; - protected abstract void shardOperationOnReplica(ReplicaOperationRequest shardRequest); + protected abstract void shardOperationOnReplica(ShardId shardId, ReplicaRequest shardRequest); - protected abstract ShardIterator shards(ClusterState clusterState, InternalRequest request) throws ElasticsearchException; + protected abstract ShardIterator shards(ClusterState clusterState, InternalRequest request); protected abstract boolean checkWriteConsistency(); @@ -176,18 +183,7 @@ public abstract class TransportShardReplicationOperationAction { - - @Override - public Request newInstance() { - return newRequestInstance(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - + class OperationTransportHandler implements TransportRequestHandler { @Override public void messageReceived(final Request request, final TransportChannel channel) throws Exception { // no need to have a threaded listener since we just send back a response @@ -216,39 +212,87 @@ public abstract class TransportShardReplicationOperationAction { - + class ReplicaOperationTransportHandler implements TransportRequestHandler { @Override - public ReplicaOperationRequest newInstance() { - return new ReplicaOperationRequest(); + public void messageReceived(final ReplicaRequest request, final TransportChannel channel) throws Exception { + new AsyncReplicaAction(request, channel).run(); + } + } + + protected static class RetryOnReplicaException extends IndexShardException { + + public RetryOnReplicaException(ShardId shardId, String msg) { + super(shardId, msg); + } + + public RetryOnReplicaException(ShardId shardId, String msg, Throwable cause) { + super(shardId, msg, cause); + } + } + + private final class AsyncReplicaAction extends AbstractRunnable { + private final ReplicaRequest request; + private final TransportChannel channel; + // important: we pass null as a timeout as failing a replica is + // something we want to avoid at all costs + private final ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger); + + + AsyncReplicaAction(ReplicaRequest request, TransportChannel channel) { + this.request = request; + this.channel = channel; } @Override - public String executor() { - return executor; - } + public void onFailure(Throwable t) { + if (t instanceof RetryOnReplicaException) { + logger.trace("Retrying operation on replica", t); + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + threadPool.executor(executor).execute(AsyncReplicaAction.this); + } - // we must never reject on because of thread pool capacity on replicas - @Override - public boolean isForceExecution() { - return true; - } + @Override + public void onClusterServiceClose() { + responseWithFailure(new NodeClosedException(clusterService.localNode())); + } - @Override - public void messageReceived(final ReplicaOperationRequest request, final TransportChannel channel) throws Exception { - try { - shardOperationOnReplica(request); - } catch (Throwable t) { - failReplicaIfNeeded(request.shardId.getIndex(), request.shardId.id(), t); - throw t; + @Override + public void onTimeout(TimeValue timeout) { + throw new AssertionError("Cannot happen: there is not timeout"); + } + }); + } else { + try { + failReplicaIfNeeded(request.internalShardId.getIndex(), request.internalShardId.id(), t); + } catch (Throwable unexpected) { + logger.error("{} unexpected error while failing replica", request.internalShardId.id(), unexpected); + } finally { + responseWithFailure(t); + } } + } + + protected void responseWithFailure(Throwable t) { + try { + channel.sendResponse(t); + } catch (IOException responseException) { + logger.warn("failed to send error message back to client for action [" + transportReplicaAction + "]", responseException); + logger.warn("actual Exception", t); + } + } + + @Override + protected void doRun() throws Exception { + shardOperationOnReplica(request.internalShardId, request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } } protected class PrimaryOperationRequest { - public ShardId shardId; - public Request request; + public final ShardId shardId; + public final Request request; public PrimaryOperationRequest(int shardId, String index, Request request) { this.shardId = new ShardId(index, shardId); @@ -256,195 +300,163 @@ public abstract class TransportShardReplicationOperationAction= 0; - //older nodes will send the concrete index as part of the request - shardId = new ShardId(request.index(), shard); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - shardId.writeTo(out); - request.writeTo(out); - } - } - - protected class AsyncShardOperationAction { + /** + * Responsible for performing all operations up to the point we start starting sending requests to replica shards. + * Including forwarding the request to another node if the primary is not assigned locally. + *

+ * Note that as soon as we start sending request to replicas, state responsibility is transferred to {@link ReplicationPhase} + */ + final class PrimaryPhase extends AbstractRunnable { private final ActionListener listener; private final InternalRequest internalRequest; - private volatile ShardIterator shardIt; - private final AtomicBoolean primaryOperationStarted = new AtomicBoolean(); - private volatile ClusterStateObserver observer; + private final ClusterStateObserver observer; + private final AtomicBoolean finished = new AtomicBoolean(false); - AsyncShardOperationAction(Request request, ActionListener listener) { + + PrimaryPhase(Request request, ActionListener listener) { this.internalRequest = new InternalRequest(request); this.listener = listener; - } - - public void start() { this.observer = new ClusterStateObserver(clusterService, internalRequest.request().timeout(), logger); - doStart(); } - /** - * Returns true if the action starting to be performed on the primary (or is done). - */ - protected void doStart() throws ElasticsearchException { - try { - ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); - if (blockException != null) { - if (blockException.retryable()) { - logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); - retry(blockException); - return; - } else { - throw blockException; - } - } - if (resolveIndex()) { - internalRequest.concreteIndex(observer.observedState().metaData().concreteSingleIndex(internalRequest.request().index(), internalRequest.request().indicesOptions())); - } else { - internalRequest.concreteIndex(internalRequest.request().index()); - } + @Override + public void onFailure(Throwable e) { + finishWithUnexpectedFailure(e); + } - resolveRequest(observer.observedState(), internalRequest, listener); - - blockException = checkRequestBlock(observer.observedState(), internalRequest); - if (blockException != null) { - if (blockException.retryable()) { - logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); - retry(blockException); - return; - } else { - throw blockException; - } - } - shardIt = shards(observer.observedState(), internalRequest); - } catch (Throwable e) { - listener.onFailure(e); + protected void doRun() { + if (checkBlocks() == false) { return; } - - // no shardIt, might be in the case between index gateway recovery and shardIt initialization - if (shardIt.size() == 0) { - logger.trace("no shard instances known for shard [{}], scheduling a retry", shardIt.shardId()); + final ShardIterator shardIt = shards(observer.observedState(), internalRequest); + final ShardRouting primary = resolvePrimary(shardIt); + if (primary == null) { retryBecauseUnavailable(shardIt.shardId(), "No active shards."); return; } + if (primary.active() == false) { + logger.trace("primary shard [{}] is not yet active, scheduling a retry.", primary.shardId()); + retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node."); + return; + } + if (observer.observedState().nodes().nodeExists(primary.currentNodeId()) == false) { + logger.trace("primary shard [{}] is assigned to anode we do not know the node, scheduling a retry.", primary.shardId(), primary.currentNodeId()); + retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node."); + return; + } + routeRequestOrPerformLocally(primary, shardIt); + } - boolean foundPrimary = false; - ShardRouting shardX; - while ((shardX = shardIt.nextOrNull()) != null) { - final ShardRouting shard = shardX; - // we only deal with primary shardIt here... - if (!shard.primary()) { - continue; - } - if (!shard.active() || !observer.observedState().nodes().nodeExists(shard.currentNodeId())) { - logger.trace("primary shard [{}] is not yet active or we do not know the node it is assigned to [{}], scheduling a retry.", shard.shardId(), shard.currentNodeId()); - retryBecauseUnavailable(shardIt.shardId(), "Primary shard is not active or isn't assigned to a known node."); - return; - } - - if (!primaryOperationStarted.compareAndSet(false, true)) { - return; - } - - foundPrimary = true; - if (shard.currentNodeId().equals(observer.observedState().nodes().localNodeId())) { - try { - if (internalRequest.request().operationThreaded()) { - threadPool.executor(executor).execute(new Runnable() { - @Override - public void run() { - try { - performOnPrimary(shard.id(), shard); - } catch (Throwable t) { - listener.onFailure(t); - } - } - }); - } else { - performOnPrimary(shard.id(), shard); - } - } catch (Throwable t) { - listener.onFailure(t); - } + /** + * checks for any cluster state blocks. Returns true if operation is OK to proceeded. + * if false is return, no further action is needed. The method takes care of any continuation, by either + * responding to the listener or scheduling a retry + */ + protected boolean checkBlocks() { + ClusterBlockException blockException = checkGlobalBlock(observer.observedState()); + if (blockException != null) { + if (blockException.retryable()) { + logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); + retry(blockException); } else { - DiscoveryNode node = observer.observedState().nodes().get(shard.currentNodeId()); - transportService.sendRequest(node, actionName, internalRequest.request(), transportOptions, new BaseTransportResponseHandler() { + finishAsFailed(blockException); + } + return false; + } + if (resolveIndex()) { + internalRequest.concreteIndex(observer.observedState().metaData().concreteSingleIndex(internalRequest.request().index(), internalRequest.request().indicesOptions())); + } else { + internalRequest.concreteIndex(internalRequest.request().index()); + } - @Override - public Response newInstance() { - return newResponseInstance(); - } + resolveRequest(observer.observedState(), internalRequest, listener); - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + blockException = checkRequestBlock(observer.observedState(), internalRequest); + if (blockException != null) { + if (blockException.retryable()) { + logger.trace("cluster is blocked ({}), scheduling a retry", blockException.getMessage()); + retry(blockException); + } else { + finishAsFailed(blockException); + } + return false; + } + return true; + } - @Override - public void handleResponse(Response response) { - listener.onResponse(response); - } + protected ShardRouting resolvePrimary(ShardIterator shardIt) { + // no shardIt, might be in the case between index gateway recovery and shardIt initialization + ShardRouting shard; + while ((shard = shardIt.nextOrNull()) != null) { + // we only deal with primary shardIt here... + if (shard.primary()) { + return shard; + } + } + return null; + } - @Override - public void handleException(TransportException exp) { + /** send the request to the node holding the primary or execute if local */ + protected void routeRequestOrPerformLocally(final ShardRouting primary, final ShardIterator shardsIt) { + if (primary.currentNodeId().equals(observer.observedState().nodes().localNodeId())) { + try { + if (internalRequest.request().operationThreaded()) { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + public void onFailure(Throwable t) { + finishAsFailed(t); + } + + @Override + protected void doRun() throws Exception { + performOnPrimary(primary, shardsIt); + } + }); + } else { + performOnPrimary(primary, shardsIt); + } + } catch (Throwable t) { + // no commit: check threadpool rejection. + finishAsFailed(t); + } + } else { + DiscoveryNode node = observer.observedState().nodes().get(primary.currentNodeId()); + transportService.sendRequest(node, actionName, internalRequest.request(), transportOptions, new BaseTransportResponseHandler() { + + @Override + public Response newInstance() { + return newResponseInstance(); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public void handleResponse(Response response) { + finishOnRemoteSuccess(response); + } + + @Override + public void handleException(TransportException exp) { + try { // if we got disconnected from the node, or the node / shard is not in the right state (being closed) if (exp.unwrapCause() instanceof ConnectTransportException || exp.unwrapCause() instanceof NodeClosedException || retryPrimaryException(exp)) { - primaryOperationStarted.set(false); internalRequest.request().setCanHaveDuplicates(); // we already marked it as started when we executed it (removed the listener) so pass false // to re-add to the cluster listener logger.trace("received an error from node the primary was assigned to ({}), scheduling a retry", exp.getMessage()); retry(exp); } else { - listener.onFailure(exp); + finishAsFailed(exp); } + } catch (Throwable t) { + finishWithUnexpectedFailure(t); } - }); - } - break; - } - // we won't find a primary if there are no shards in the shard iterator, retry... - if (!foundPrimary) { - logger.trace("couldn't find a eligible primary shard, scheduling for retry."); - retryBecauseUnavailable(shardIt.shardId(), "No active shards."); + } + }); } } @@ -452,7 +464,7 @@ public abstract class TransportShardReplicationOperationAction primaryResponse = shardOperationOnPrimary(clusterState, por); - performReplicas(por, primaryResponse); + PrimaryOperationRequest por = new PrimaryOperationRequest(primary.id(), internalRequest.concreteIndex(), internalRequest.request()); + Tuple primaryResponse = shardOperationOnPrimary(observer.observedState(), por); + logger.trace("operation completed on primary [{}]", primary); + replicationPhase = new ReplicationPhase(shardsIt, primaryResponse.v2(), primaryResponse.v1(), observer, primary, internalRequest, listener); } catch (Throwable e) { internalRequest.request.setCanHaveDuplicates(); // shard has not been allocated yet, retry it here if (retryPrimaryException(e)) { - primaryOperationStarted.set(false); logger.trace("had an error while performing operation on primary ({}), scheduling a retry.", e.getMessage()); retry(e); return; } - if (e instanceof ElasticsearchException && ((ElasticsearchException) e).status() == RestStatus.CONFLICT) { + if (ExceptionsHelper.status(e) == RestStatus.CONFLICT) { if (logger.isTraceEnabled()) { - logger.trace(shard.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); + logger.trace(primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); } } else { if (logger.isDebugEnabled()) { - logger.debug(shard.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); + logger.debug(primary.shortSummary() + ": Failed to execute [" + internalRequest.request() + "]", e); } } - listener.onFailure(e); - } - } - - void performReplicas(PrimaryOperationRequest por, Tuple primaryResponse) { - ShardRouting shard; - // we double check on the state, if it got changed we need to make sure we take the latest one cause - // maybe a replica shard started its recovery process and we need to apply it there... - - // we also need to make sure if the new state has a new primary shard (that we indexed to before) started - // and assigned to another node (while the indexing happened). In that case, we want to apply it on the - // new primary shard as well... - ClusterState newState = clusterService.state(); - ShardRouting newPrimaryShard = null; - int numberOfUnassignedReplicas = 0; - if (observer.observedState() != newState) { - shardIt.reset(); - ShardRouting originalPrimaryShard = null; - while ((shard = shardIt.nextOrNull()) != null) { - if (shard.primary()) { - originalPrimaryShard = shard; - break; - } - } - if (originalPrimaryShard == null || !originalPrimaryShard.active()) { - throw new ElasticsearchIllegalStateException("unexpected state, failed to find primary shard on an index operation that succeeded"); - } - - observer.reset(newState); - shardIt = shards(newState, internalRequest); - while ((shard = shardIt.nextOrNull()) != null) { - if (shard.primary()) { - if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId())) { - newPrimaryShard = null; - } else { - newPrimaryShard = shard; - } - } - - if (!shard.primary() && shard.unassigned()) { - numberOfUnassignedReplicas++; - } - } - shardIt.reset(); - internalRequest.request().setCanHaveDuplicates(); // safe side, cluster state changed, we might have dups - } else { - shardIt.reset(); - while ((shard = shardIt.nextOrNull()) != null) { - if (shard.state() != ShardRoutingState.STARTED) { - internalRequest.request().setCanHaveDuplicates(); - } - if (!shard.primary() && shard.unassigned()) { - numberOfUnassignedReplicas++; - } - } - shardIt.reset(); - } - - int numberOfPendingShardInstances = shardIt.assignedReplicasIncludingRelocating(); - if (newPrimaryShard != null) { - numberOfPendingShardInstances++; - } - ReplicationState replicationState = new ReplicationState(por, shardIt, primaryResponse.v1(), primaryResponse.v2(), listener, numberOfPendingShardInstances, numberOfUnassignedReplicas); - if (numberOfPendingShardInstances == 0) { - replicationState.forceFinish(); + finishAsFailed(e); return; } - IndexMetaData indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex()); - if (newPrimaryShard != null) { - performOnReplica(replicationState, newPrimaryShard, newPrimaryShard.currentNodeId(), indexMetaData); - } - - shardIt.reset(); // reset the iterator - while ((shard = shardIt.nextOrNull()) != null) { - // if its unassigned, nothing to do here... - if (shard.unassigned()) { - continue; - } - - // if the shard is primary and relocating, add one to the counter since we perform it on the replica as well - // (and we already did it on the primary) - boolean doOnlyOnRelocating = false; - if (shard.primary()) { - if (shard.relocating()) { - doOnlyOnRelocating = true; - } else { - continue; - } - } - // we index on a replica that is initializing as well since we might not have got the event - // yet that it was started. We will get an exception IllegalShardState exception if its not started - // and that's fine, we will ignore it - if (!doOnlyOnRelocating) { - performOnReplica(replicationState, shard, shard.currentNodeId(), indexMetaData); - } - if (shard.relocating()) { - performOnReplica(replicationState, shard, shard.relocatingNodeId(), indexMetaData); - } - } + finishAndMoveToReplication(replicationPhase); } - void performOnReplica(final ReplicationState state, final ShardRouting shard, final String nodeId, final IndexMetaData indexMetaData) { - // if we don't have that node, it means that it might have failed and will be created again, in - // this case, we don't have to do the operation, and just let it failover - if (!observer.observedState().nodes().nodeExists(nodeId)) { - state.onReplicaFailure(nodeId, null); - return; - } - - final ReplicaOperationRequest shardRequest = new ReplicaOperationRequest(shardIt.shardId(), state.replicaRequest()); - - // If the replicas use shadow replicas, there is no reason to - // perform the action on the replica, so skip it and - // immediately return - if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) { - // this delays mapping updates on replicas because they have - // to wait until they get the new mapping through the cluster - // state, which is why we recommend pre-defined mappings for - // indices using shadow replicas - state.onReplicaSuccess(); - return; - } - - if (!nodeId.equals(observer.observedState().nodes().localNodeId())) { - final DiscoveryNode node = observer.observedState().nodes().get(nodeId); - transportService.sendRequest(node, transportReplicaAction, shardRequest, - transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - @Override - public void handleResponse(TransportResponse.Empty vResponse) { - state.onReplicaSuccess(); - } - - @Override - public void handleException(TransportException exp) { - state.onReplicaFailure(nodeId, exp); - logger.trace("[{}] Transport failure during replica request [{}] ", exp, node, internalRequest.request()); - if (!ignoreReplicaException(exp)) { - logger.warn("Failed to perform " + actionName + " on remote replica " + node + shardIt.shardId(), exp); - shardStateAction.shardFailed(shard, indexMetaData.getUUID(), - "Failed to perform [" + actionName + "] on replica, message [" + ExceptionsHelper.detailedMessage(exp) + "]"); - } - } - - }); - } else { - if (internalRequest.request().operationThreaded()) { - try { - threadPool.executor(executor).execute(new AbstractRunnable() { - @Override - protected void doRun() { - try { - shardOperationOnReplica(shardRequest); - state.onReplicaSuccess(); - } catch (Throwable e) { - state.onReplicaFailure(nodeId, e); - failReplicaIfNeeded(shard.index(), shard.id(), e); - } - } - - // we must never reject on because of thread pool capacity on replicas - @Override - public boolean isForceExecution() { - return true; - } - - @Override - public void onFailure(Throwable t) { - state.onReplicaFailure(nodeId, t); - } - }); - } catch (Throwable e) { - failReplicaIfNeeded(shard.index(), shard.id(), e); - state.onReplicaFailure(nodeId, e); - } - } else { - try { - shardOperationOnReplica(shardRequest); - state.onReplicaSuccess(); - } catch (Throwable e) { - failReplicaIfNeeded(shard.index(), shard.id(), e); - state.onReplicaFailure(nodeId, e); - } - } - } - } - - boolean raiseFailureIfHaveNotEnoughActiveShardCopies(ShardRouting shard, ClusterState state) { - if (!checkWriteConsistency) { - return false; + /** + * checks whether we can perform a write based on the write consistency setting + * returns **null* if OK to proceed, or a string describing the reason to stop + */ + String checkWriteConsistency(ShardRouting shard) { + if (checkWriteConsistency == false) { + return null; } final WriteConsistencyLevel consistencyLevel; @@ -703,11 +579,11 @@ public abstract class TransportShardReplicationOperationAction 2) { // only for more than 2 in the number of shardIt it makes sense, otherwise its 1 shard with 1 replica, quorum is 1 (which is what it is initialized to) requiredNumber = (shardRoutingTable.getSize() / 2) + 1; @@ -728,24 +604,21 @@ public abstract class TransportShardReplicationOperationAction listener; private final AtomicBoolean finished = new AtomicBoolean(false); private final AtomicInteger success = new AtomicInteger(1); // We already wrote into the primary shard private final ConcurrentMap shardReplicaFailures = ConcurrentCollections.newConcurrentMap(); - + private final IndexMetaData indexMetaData; + private final ShardRouting originalPrimaryShard; private final AtomicInteger pending; - private final int numberOfShardInstances; + private final int totalShards; + private final ClusterStateObserver observer; - public ReplicationState(PrimaryOperationRequest por, ShardIterator shardsIter, Response finalResponse, ReplicaRequest replicaRequest, ActionListener listener, int numberOfPendingShardInstances, int numberOfUnassignedReplicas) { - this.request = por.request; - this.finalResponse = finalResponse; + /** + * the constructor doesn't take any action, just calculates state. Call {@link #run()} to start + * replicating. + */ + public ReplicationPhase(ShardIterator originalShardIt, ReplicaRequest replicaRequest, Response finalResponse, + ClusterStateObserver observer, ShardRouting originalPrimaryShard, + InternalRequest internalRequest, ActionListener listener) { this.replicaRequest = replicaRequest; - this.shardId = shardsIter.shardId(); this.listener = listener; - this.numberOfShardInstances = 1 + numberOfPendingShardInstances + numberOfUnassignedReplicas; + this.finalResponse = finalResponse; + this.originalPrimaryShard = originalPrimaryShard; + this.observer = observer; + indexMetaData = observer.observedState().metaData().index(internalRequest.concreteIndex()); + + ShardRouting shard; + // we double check on the state, if it got changed we need to make sure we take the latest one cause + // maybe a replica shard started its recovery process and we need to apply it there... + + // we also need to make sure if the new state has a new primary shard (that we indexed to before) started + // and assigned to another node (while the indexing happened). In that case, we want to apply it on the + // new primary shard as well... + ClusterState newState = clusterService.state(); + + int numberOfUnassignedOrShadowReplicas = 0; + int numberOfPendingShardInstances = 0; + if (observer.observedState() != newState) { + observer.reset(newState); + shardIt = shards(newState, internalRequest); + while ((shard = shardIt.nextOrNull()) != null) { + if (shard.primary()) { + if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId()) == false) { + // there is a new primary, we'll have to replicate to it. + numberOfPendingShardInstances++; + } + if (shard.relocating()) { + numberOfPendingShardInstances++; + } + } else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) { + // If the replicas use shadow replicas, there is no reason to + // perform the action on the replica, so skip it and + // immediately return + + // this delays mapping updates on replicas because they have + // to wait until they get the new mapping through the cluster + // state, which is why we recommend pre-defined mappings for + // indices using shadow replicas + numberOfUnassignedOrShadowReplicas++; + } else if (shard.unassigned()) { + numberOfUnassignedOrShadowReplicas++; + } else if (shard.relocating()) { + // we need to send to two copies + numberOfPendingShardInstances += 2; + } else { + numberOfPendingShardInstances++; + } + } + internalRequest.request().setCanHaveDuplicates(); // safe side, cluster state changed, we might have dups + } else { + shardIt = originalShardIt; + shardIt.reset(); + while ((shard = shardIt.nextOrNull()) != null) { + if (shard.state() != ShardRoutingState.STARTED) { + replicaRequest.setCanHaveDuplicates(); + } + if (shard.unassigned()) { + numberOfUnassignedOrShadowReplicas++; + } else if (shard.primary()) { + if (shard.relocating()) { + // we have to replicate to the other copy + numberOfPendingShardInstances += 1; + } + } else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings())) { + // If the replicas use shadow replicas, there is no reason to + // perform the action on the replica, so skip it and + // immediately return + + // this delays mapping updates on replicas because they have + // to wait until they get the new mapping through the cluster + // state, which is why we recommend pre-defined mappings for + // indices using shadow replicas + numberOfUnassignedOrShadowReplicas++; + } else if (shard.relocating()) { + // we need to send to two copies + numberOfPendingShardInstances += 2; + } else { + numberOfPendingShardInstances++; + } + } + } + + // one for the primary already done + this.totalShards = 1 + numberOfPendingShardInstances + numberOfUnassignedOrShadowReplicas; this.pending = new AtomicInteger(numberOfPendingShardInstances); } - public Request request() { - return this.request; + /** total shard copies */ + int totalShards() { + return totalShards; } - public ReplicaRequest replicaRequest() { - return this.replicaRequest; + /** total successful operations so far */ + int successful() { + return success.get(); } - public void onReplicaFailure(String nodeId, @Nullable Throwable e) { + /** number of pending operations */ + int pending() { + return pending.get(); + } + + @Override + public void onFailure(Throwable t) { + logger.error("unexpected error while replicating for action [{}]. shard [{}]. ", t, actionName, shardIt.shardId()); + forceFinishAsFailed(t); + } + + /** start sending current requests to replicas */ + @Override + protected void doRun() { + if (pending.get() == 0) { + doFinish(); + return; + } + ShardRouting shard; + shardIt.reset(); // reset the iterator + while ((shard = shardIt.nextOrNull()) != null) { + // if its unassigned, nothing to do here... + if (shard.unassigned()) { + continue; + } + + // we index on a replica that is initializing as well since we might not have got the event + // yet that it was started. We will get an exception IllegalShardState exception if its not started + // and that's fine, we will ignore it + if (shard.primary()) { + if (originalPrimaryShard.currentNodeId().equals(shard.currentNodeId()) == false) { + // there is a new primary, we'll have to replicate to it. + performOnReplica(shard, shard.currentNodeId()); + } + if (shard.relocating()) { + performOnReplica(shard, shard.relocatingNodeId()); + } + } else if (IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings()) == false) { + performOnReplica(shard, shard.currentNodeId()); + if (shard.relocating()) { + performOnReplica(shard, shard.relocatingNodeId()); + } + } + } + } + + /** send operation to the given node or perform it if local */ + void performOnReplica(final ShardRouting shard, final String nodeId) { + // if we don't have that node, it means that it might have failed and will be created again, in + // this case, we don't have to do the operation, and just let it failover + if (!observer.observedState().nodes().nodeExists(nodeId)) { + onReplicaFailure(nodeId, null); + return; + } + + replicaRequest.internalShardId = shardIt.shardId(); + + if (!nodeId.equals(observer.observedState().nodes().localNodeId())) { + final DiscoveryNode node = observer.observedState().nodes().get(nodeId); + transportService.sendRequest(node, transportReplicaAction, replicaRequest, + transportOptions, new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + @Override + public void handleResponse(TransportResponse.Empty vResponse) { + onReplicaSuccess(); + } + + @Override + public void handleException(TransportException exp) { + onReplicaFailure(nodeId, exp); + logger.trace("[{}] transport failure during replica request [{}] ", exp, node, replicaRequest); + if (ignoreReplicaException(exp) == false) { + logger.warn("failed to perform " + actionName + " on remote replica " + node + shardIt.shardId(), exp); + shardStateAction.shardFailed(shard, indexMetaData.getUUID(), + "Failed to perform [" + actionName + "] on replica, message [" + ExceptionsHelper.detailedMessage(exp) + "]"); + } + } + + }); + } else { + if (replicaRequest.operationThreaded()) { + try { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() { + try { + shardOperationOnReplica(shard.shardId(), replicaRequest); + onReplicaSuccess(); + } catch (Throwable e) { + onReplicaFailure(nodeId, e); + failReplicaIfNeeded(shard.index(), shard.id(), e); + } + } + + // we must never reject on because of thread pool capacity on replicas + @Override + public boolean isForceExecution() { + return true; + } + + @Override + public void onFailure(Throwable t) { + onReplicaFailure(nodeId, t); + } + }); + } catch (Throwable e) { + failReplicaIfNeeded(shard.index(), shard.id(), e); + onReplicaFailure(nodeId, e); + } + } else { + try { + shardOperationOnReplica(shard.shardId(), replicaRequest); + onReplicaSuccess(); + } catch (Throwable e) { + failReplicaIfNeeded(shard.index(), shard.id(), e); + onReplicaFailure(nodeId, e); + } + } + } + } + + + void onReplicaFailure(String nodeId, @Nullable Throwable e) { // Only version conflict should be ignored from being put into the _shards header? - if (e != null && !ignoreReplicaException(e)) { + if (e != null && ignoreReplicaException(e) == false) { shardReplicaFailures.put(nodeId, e); } - finishIfNeeded(); + decPendingAndFinishIfNeeded(); } - public void onReplicaSuccess() { + void onReplicaSuccess() { success.incrementAndGet(); - finishIfNeeded(); + decPendingAndFinishIfNeeded(); } - public void forceFinish() { - doFinish(); - } - - private void finishIfNeeded() { - if (pending.decrementAndGet() == 0) { + private void decPendingAndFinishIfNeeded() { + if (pending.decrementAndGet() <= 0) { doFinish(); } } + private void forceFinishAsFailed(Throwable t) { + if (finished.compareAndSet(false, true)) { + listener.onFailure(t); + } + } + private void doFinish() { if (finished.compareAndSet(false, true)) { + final ShardId shardId = shardIt.shardId(); final ActionWriteResponse.ShardInfo.Failure[] failuresArray; if (!shardReplicaFailures.isEmpty()) { int slot = 0; failuresArray = new ActionWriteResponse.ShardInfo.Failure[shardReplicaFailures.size()]; for (Map.Entry entry : shardReplicaFailures.entrySet()) { - String reason = ExceptionsHelper.detailedMessage(entry.getValue()); RestStatus restStatus = ExceptionsHelper.status(entry.getValue()); failuresArray[slot++] = new ActionWriteResponse.ShardInfo.Failure( - shardId.getIndex(), shardId.getId(), entry.getKey(), reason, restStatus, false + shardId.getIndex(), shardId.getId(), entry.getKey(), entry.getValue(), restStatus, false ); } } else { failuresArray = ActionWriteResponse.EMPTY; } - finalResponse.setShardInfo( - new ActionWriteResponse.ShardInfo( - numberOfShardInstances, + finalResponse.setShardInfo(new ActionWriteResponse.ShardInfo( + totalShards, success.get(), failuresArray diff --git a/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java b/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java index 19b72fa5b87..e1fc74dcb28 100644 --- a/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java +++ b/src/main/java/org/elasticsearch/action/support/single/custom/SingleCustomOperationRequest.java @@ -25,6 +25,7 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -33,6 +34,8 @@ import java.io.IOException; */ public abstract class SingleCustomOperationRequest extends ActionRequest implements IndicesRequest { + ShardId internalShardId; + private boolean threadedOperation = true; private boolean preferLocal = true; private String index; @@ -113,6 +116,9 @@ public abstract class SingleCustomOperationRequest extends TransportAction { +public abstract class TransportSingleCustomOperationAction extends HandledTransportAction { protected final ClusterService clusterService; - protected final TransportService transportService; final String transportShardAction; final String executor; - protected TransportSingleCustomOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + protected TransportSingleCustomOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, + Class request, String executor) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; this.transportShardAction = actionName + "[s]"; - this.executor = executor(); + this.executor = executor; - transportService.registerHandler(transportShardAction, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, request, executor, new ShardTransportHandler()); } @Override @@ -73,8 +66,6 @@ public abstract class TransportSingleCustomOperationAction() { + internalRequest.request().internalShardId = shard.shardId(); + transportService.sendRequest(node, transportShardAction, internalRequest.request(), new BaseTransportResponseHandler() { @Override public Response newInstance() { return newResponse(); @@ -290,73 +280,15 @@ public abstract class TransportSingleCustomOperationAction { + private class ShardTransportHandler implements TransportRequestHandler { @Override - public ShardSingleOperationRequest newInstance() { - return new ShardSingleOperationRequest(); - } - - @Override - public String executor() { - return executor; - } - - @Override - public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception { - Response response = shardOperation(request.request(), request.shardId()); + public void messageReceived(final Request request, final TransportChannel channel) throws Exception { + Response response = shardOperation(request, request.internalShardId); channel.sendResponse(response); } } - protected class ShardSingleOperationRequest extends TransportRequest implements IndicesRequest { - - private Request request; - private ShardId shardId; - - ShardSingleOperationRequest() { - } - - public ShardSingleOperationRequest(Request request, ShardId shardId) { - super(request); - this.request = request; - this.shardId = shardId; - } - - public Request request() { - return request; - } - - @Override - public String[] indices() { - return request.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return request.indicesOptions(); - } - - public ShardId shardId() { - return shardId; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = newRequest(); - request.readFrom(in); - shardId = ShardId.readShardId(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - shardId.writeTo(out); - } - } - /** * Internal request class that gets built on each node. Holds the original request plus additional info. */ diff --git a/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java b/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java index 3e2ec365c37..d4190aca619 100644 --- a/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java +++ b/src/main/java/org/elasticsearch/action/support/single/instance/TransportInstanceSingleOperationAction.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.UnavailableShardsException; import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.HandledTransportAction; import org.elasticsearch.action.support.TransportAction; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -47,22 +48,18 @@ import java.util.concurrent.atomic.AtomicBoolean; /** * */ -public abstract class TransportInstanceSingleOperationAction extends TransportAction { +public abstract class TransportInstanceSingleOperationAction extends HandledTransportAction { protected final ClusterService clusterService; - protected final TransportService transportService; final String executor; - protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, actionName, threadPool, actionFilters); + protected TransportInstanceSingleOperationAction(Settings settings, String actionName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, Class request) { + super(settings, actionName, threadPool, transportService, actionFilters, request); this.clusterService = clusterService; this.transportService = transportService; - this.executor = executor(); - - transportService.registerHandler(actionName, new TransportHandler()); } @Override @@ -72,9 +69,7 @@ public abstract class TransportInstanceSingleOperationAction listener) throws ElasticsearchException; - - protected abstract Request newRequest(); + protected abstract void shardOperation(InternalRequest request, ActionListener listener); protected abstract Response newResponse(); @@ -102,7 +97,7 @@ public abstract class TransportInstanceSingleOperationAction { - - @Override - public Request newInstance() { - return newRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public void messageReceived(Request request, final TransportChannel channel) throws Exception { - // no need to have a threaded listener since we just send back a response - request.listenerThreaded(false); - execute(request, new ActionListener() { - @Override - public void onResponse(Response result) { - try { - channel.sendResponse(result); - } catch (Throwable e) { - onFailure(e); - } - } - - @Override - public void onFailure(Throwable e) { - try { - channel.sendResponse(e); - } catch (Exception e1) { - logger.warn("Failed to send response for get", e1); - } - } - }); - } - } - /** * Internal request class that gets built on each node. Holds the original request plus additional info. */ diff --git a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java index 8a0263fd0d5..74db0435709 100644 --- a/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java +++ b/src/main/java/org/elasticsearch/action/support/single/shard/SingleShardOperationRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.index.shard.ShardId; import java.io.IOException; @@ -34,10 +35,10 @@ import java.io.IOException; */ public abstract class SingleShardOperationRequest extends ActionRequest implements IndicesRequest { + ShardId internalShardId; + protected String index; - public static final IndicesOptions INDICES_OPTIONS = IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - private boolean threadedOperation = true; protected SingleShardOperationRequest() { @@ -107,6 +108,9 @@ public abstract class SingleShardOperationRequest request, String executor) { super(settings, actionName, threadPool, actionFilters); this.clusterService = clusterService; this.transportService = transportService; this.transportShardAction = actionName + "[s]"; - this.executor = executor(); + this.executor = executor; if (!isSubAction()) { - transportService.registerHandler(actionName, new TransportHandler()); + transportService.registerRequestHandler(actionName, request, ThreadPool.Names.SAME, new TransportHandler()); } - transportService.registerHandler(transportShardAction, new ShardTransportHandler()); + transportService.registerRequestHandler(transportShardAction, request, executor, new ShardTransportHandler()); } /** @@ -89,11 +83,7 @@ public abstract class TransportShardSingleOperationAction() { + internalRequest.request().internalShardId = shardRouting.shardId(); + transportService.sendRequest(node, transportShardAction, internalRequest.request(), new BaseTransportResponseHandler() { @Override public Response newInstance() { @@ -237,17 +228,7 @@ public abstract class TransportShardSingleOperationAction { - - @Override - public Request newInstance() { - return newRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + private class TransportHandler implements TransportRequestHandler { @Override public void messageReceived(Request request, final TransportChannel channel) throws Exception { @@ -277,77 +258,18 @@ public abstract class TransportShardSingleOperationAction { + private class ShardTransportHandler implements TransportRequestHandler { @Override - public ShardSingleOperationRequest newInstance() { - return new ShardSingleOperationRequest(); - } - - @Override - public String executor() { - return executor; - } - - @Override - public void messageReceived(final ShardSingleOperationRequest request, final TransportChannel channel) throws Exception { + public void messageReceived(final Request request, final TransportChannel channel) throws Exception { if (logger.isTraceEnabled()) { - logger.trace("executing [{}] on shard [{}]", request.request(), request.shardId()); + logger.trace("executing [{}] on shard [{}]", request, request.internalShardId); } - Response response = shardOperation(request.request(), request.shardId()); + Response response = shardOperation(request, request.internalShardId); channel.sendResponse(response); } } - class ShardSingleOperationRequest extends TransportRequest implements IndicesRequest { - - private Request request; - - private ShardId shardId; - - ShardSingleOperationRequest() { - } - - ShardSingleOperationRequest(Request request, ShardId shardId) { - super(request); - this.request = request; - this.shardId = shardId; - } - - public Request request() { - return request; - } - - public ShardId shardId() { - return shardId; - } - - @Override - public String[] indices() { - return request.indices(); - } - - @Override - public IndicesOptions indicesOptions() { - return request.indicesOptions(); - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - request = newRequest(); - request.readFrom(in); - shardId = ShardId.readShardId(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - request.writeTo(out); - shardId.writeTo(out); - } - } - /** * Internal request class that gets built on each node. Holds the original request plus additional info. */ diff --git a/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java b/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java index a413edfa232..3cde0f41ce5 100644 --- a/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java +++ b/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsRequest.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.termvectors; import com.google.common.collect.Iterators; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.*; import org.elasticsearch.common.Nullable; @@ -98,7 +97,7 @@ public class MultiTermVectorsRequest extends ActionRequest listener) throws ElasticsearchException { + protected void shardOperation(final InternalRequest request, final ActionListener listener) { shardOperation(request, listener, 0); } - protected void shardOperation(final InternalRequest request, final ActionListener listener, final int retryCount) throws ElasticsearchException { + protected void shardOperation(final InternalRequest request, final ActionListener listener, final int retryCount) { IndexService indexService = indicesService.indexServiceSafe(request.concreteIndex()); IndexShard indexShard = indexService.shardSafe(request.request().shardId()); final UpdateHelper.Result result = updateHelper.prepare(request.request(), indexShard); @@ -280,7 +274,7 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio listener.onResponse(update); break; default: - throw new ElasticsearchIllegalStateException("Illegal operation " + result.operation()); + throw new IllegalStateException("Illegal operation " + result.operation()); } } } diff --git a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java index 4e4a2864f90..ea90ac33f0b 100644 --- a/src/main/java/org/elasticsearch/action/update/UpdateHelper.java +++ b/src/main/java/org/elasticsearch/action/update/UpdateHelper.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.update; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.client.Requests; @@ -46,6 +45,7 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.lookup.SourceLookup; @@ -94,13 +94,13 @@ public class UpdateHelper extends AbstractComponent { ctx.put("op", "create"); ctx.put("_source", upsertDoc); try { - ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptType, ScriptContext.Standard.UPDATE, request.scriptParams); + ExecutableScript script = scriptService.executable(new Script(request.scriptLang, request.script, request.scriptType, request.scriptParams), ScriptContext.Standard.UPDATE); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... ctx = (Map) script.unwrap(ctx); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to execute script", e); + throw new IllegalArgumentException("failed to execute script", e); } //Allow the script to set TTL using ctx._ttl ttl = getTTLFromScriptContext(ctx); @@ -193,13 +193,13 @@ public class UpdateHelper extends AbstractComponent { ctx.put("_source", sourceAndContent.v2()); try { - ExecutableScript script = scriptService.executable(request.scriptLang, request.script, request.scriptType, ScriptContext.Standard.UPDATE, request.scriptParams); + ExecutableScript script = scriptService.executable(new Script(request.scriptLang, request.script, request.scriptType, request.scriptParams), ScriptContext.Standard.UPDATE); script.setNextVar("ctx", ctx); script.run(); // we need to unwrap the ctx... ctx = (Map) script.unwrap(ctx); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to execute script", e); + throw new IllegalArgumentException("failed to execute script", e); } operation = (String) ctx.get("op"); diff --git a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java index 9020d115a8b..d351a9d4ea2 100644 --- a/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java +++ b/src/main/java/org/elasticsearch/bootstrap/Bootstrap.java @@ -26,7 +26,6 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.CreationException; import org.elasticsearch.common.inject.spi.Message; -import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.jna.Natives; import org.elasticsearch.common.logging.ESLogger; @@ -40,7 +39,6 @@ import org.elasticsearch.node.Node; import org.elasticsearch.node.NodeBuilder; import org.elasticsearch.node.internal.InternalSettingsPreparer; -import java.nio.file.Paths; import java.util.Locale; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -60,12 +58,12 @@ public class Bootstrap { private static volatile CountDownLatch keepAliveLatch; private static Bootstrap bootstrap; - private void setup(boolean addShutdownHook, Tuple tuple) throws Exception { - if (tuple.v1().getAsBoolean("bootstrap.mlockall", false)) { + private void setup(boolean addShutdownHook, Settings settings, Environment environment) throws Exception { + if (settings.getAsBoolean("bootstrap.mlockall", false)) { Natives.tryMlockall(); } - NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(tuple.v1()).loadConfigSettings(false); + NodeBuilder nodeBuilder = NodeBuilder.nodeBuilder().settings(settings).loadConfigSettings(false); node = nodeBuilder.build(); if (addShutdownHook) { Runtime.getRuntime().addShutdownHook(new Thread() { @@ -76,7 +74,7 @@ public class Bootstrap { }); } - if (tuple.v1().getAsBoolean("bootstrap.ctrlhandler", true)) { + if (settings.getAsBoolean("bootstrap.ctrlhandler", true)) { Natives.addConsoleCtrlHandler(new ConsoleCtrlHandler() { @Override public boolean handle(int code) { @@ -91,13 +89,27 @@ public class Bootstrap { } }); } + // install SM after natives, JNA can require strange permissions + setupSecurity(settings, environment); + } + + /** + * option for elasticsearch.yml etc to turn off our security manager completely, + * for example if you want to have your own configuration or just disable. + */ + static final String SECURITY_SETTING = "security.manager.enabled"; + + private void setupSecurity(Settings settings, Environment environment) throws Exception { + if (settings.getAsBoolean(SECURITY_SETTING, true)) { + Security.configure(environment); + } } @SuppressForbidden(reason = "Exception#printStackTrace()") - private static void setupLogging(Tuple tuple) { + private static void setupLogging(Settings settings, Environment environment) { try { - tuple.v1().getClassLoader().loadClass("org.apache.log4j.Logger"); - LogConfigurator.configure(tuple.v1()); + settings.getClassLoader().loadClass("org.apache.log4j.Logger"); + LogConfigurator.configure(settings); } catch (ClassNotFoundException e) { // no log4j } catch (NoClassDefFoundError e) { @@ -117,8 +129,10 @@ public class Bootstrap { */ public void init(String[] args) throws Exception { Tuple tuple = initialSettings(); - setupLogging(tuple); - setup(true, tuple); + Settings settings = tuple.v1(); + Environment environment = tuple.v2(); + setupLogging(settings, environment); + setup(true, settings, environment); } /** @@ -168,10 +182,13 @@ public class Bootstrap { foreground = false; } - Tuple tuple = null; + Settings settings = null; + Environment environment = null; try { - tuple = initialSettings(); - setupLogging(tuple); + Tuple tuple = initialSettings(); + settings = tuple.v1(); + environment = tuple.v2(); + setupLogging(settings, environment); } catch (Exception e) { String errorMessage = buildErrorMessage("Setup", e); sysError(errorMessage, true); @@ -199,7 +216,7 @@ public class Bootstrap { // fail if using broken version JVMCheck.check(); - bootstrap.setup(true, tuple); + bootstrap.setup(true, settings, environment); stage = "Startup"; bootstrap.start(); diff --git a/src/main/java/org/elasticsearch/bootstrap/Security.java b/src/main/java/org/elasticsearch/bootstrap/Security.java new file mode 100644 index 00000000000..67ac531f0e7 --- /dev/null +++ b/src/main/java/org/elasticsearch/bootstrap/Security.java @@ -0,0 +1,114 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import com.google.common.io.ByteStreams; +import org.apache.lucene.util.IOUtils; +import org.apache.lucene.util.StringHelper; +import org.elasticsearch.env.Environment; + +import java.io.*; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; + +/** + * Initializes securitymanager with necessary permissions. + *

+ * We use a template file (the one we test with), and add additional + * permissions based on the environment (data paths, etc) + */ +class Security { + + /** template policy file, the one used in tests */ + static final String POLICY_RESOURCE = "security.policy"; + + /** + * Initializes securitymanager for the environment + * Can only happen once! + */ + static void configure(Environment environment) throws IOException { + // init lucene random seed. it will use /dev/urandom where available. + StringHelper.randomId(); + InputStream config = Security.class.getResourceAsStream(POLICY_RESOURCE); + if (config == null) { + throw new NoSuchFileException(POLICY_RESOURCE); + } + Path newConfig = processTemplate(config, environment); + System.setProperty("java.security.policy", newConfig.toString()); + System.setSecurityManager(new SecurityManager()); + IOUtils.deleteFilesIgnoringExceptions(newConfig); // TODO: maybe log something if it fails? + } + + // package-private for testing + static Path processTemplate(InputStream template, Environment environment) throws IOException { + Path processed = Files.createTempFile(null, null); + try (OutputStream output = new BufferedOutputStream(Files.newOutputStream(processed))) { + // copy the template as-is. + try (InputStream in = new BufferedInputStream(template)) { + ByteStreams.copy(in, output); + } + + // all policy files are UTF-8: + // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html + try (Writer writer = new OutputStreamWriter(output, StandardCharsets.UTF_8)) { + writer.write(System.lineSeparator()); + writer.write("grant {"); + writer.write(System.lineSeparator()); + + // add permissions for all configured paths. + // TODO: improve test infra so we can reduce permissions where read/write + // is not really needed... + addPath(writer, environment.homeFile(), "read,readlink,write,delete"); + addPath(writer, environment.configFile(), "read,readlink,write,delete"); + addPath(writer, environment.logsFile(), "read,readlink,write,delete"); + addPath(writer, environment.pluginsFile(), "read,readlink,write,delete"); + for (Path path : environment.dataFiles()) { + addPath(writer, path, "read,readlink,write,delete"); + } + for (Path path : environment.dataWithClusterFiles()) { + addPath(writer, path, "read,readlink,write,delete"); + } + + writer.write("};"); + writer.write(System.lineSeparator()); + } + } + return processed; + } + + static void addPath(Writer writer, Path path, String permissions) throws IOException { + // paths may not exist yet + Files.createDirectories(path); + // add each path twice: once for itself, again for files underneath it + writer.write("permission java.io.FilePermission \"" + encode(path) + "\", \"" + permissions + "\";"); + writer.write(System.lineSeparator()); + writer.write("permission java.io.FilePermission \"" + encode(path) + "${/}-\", \"" + permissions + "\";"); + writer.write(System.lineSeparator()); + } + + // Any backslashes in paths must be escaped, because it is the escape character when parsing. + // See "Note Regarding File Path Specifications on Windows Systems". + // https://docs.oracle.com/javase/7/docs/technotes/guides/security/PolicyFiles.html + static String encode(Path path) { + return path.toString().replace("\\", "\\\\"); + } +} diff --git a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java index 96f6a098ab8..44c6dc3a4ab 100644 --- a/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java +++ b/src/main/java/org/elasticsearch/cache/recycler/PageCacheRecycler.java @@ -20,7 +20,6 @@ package org.elasticsearch.cache.recycler; import com.google.common.base.Strings; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.recycler.AbstractRecyclerC; @@ -213,7 +212,7 @@ public class PageCacheRecycler extends AbstractComponent { try { return Type.valueOf(type.toUpperCase(Locale.ROOT)); } catch (IllegalArgumentException e) { - throw new ElasticsearchIllegalArgumentException("no type support [" + type + "]"); + throw new IllegalArgumentException("no type support [" + type + "]"); } } diff --git a/src/main/java/org/elasticsearch/client/Client.java b/src/main/java/org/elasticsearch/client/Client.java index 55b8a83428a..973ebf511c3 100644 --- a/src/main/java/org/elasticsearch/client/Client.java +++ b/src/main/java/org/elasticsearch/client/Client.java @@ -29,15 +29,15 @@ import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.exists.ExistsRequestBuilder; import org.elasticsearch.action.exists.ExistsResponse; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.fieldstats.FieldStatsRequest; +import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -220,29 +220,6 @@ public interface Client extends ElasticsearchClient, Releasable { */ BulkRequestBuilder prepareBulk(); - /** - * Deletes all documents from one or more indices based on a query. - * - * @param request The delete by query request - * @return The result future - * @see Requests#deleteByQueryRequest(String...) - */ - ActionFuture deleteByQuery(DeleteByQueryRequest request); - - /** - * Deletes all documents from one or more indices based on a query. - * - * @param request The delete by query request - * @param listener A listener to be notified with a result - * @see Requests#deleteByQueryRequest(String...) - */ - void deleteByQuery(DeleteByQueryRequest request, ActionListener listener); - - /** - * Deletes all documents from one or more indices based on a query. - */ - DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices); - /** * Gets the document that was indexed from an index with a type and id. * @@ -682,6 +659,12 @@ public interface Client extends ElasticsearchClient, Releasable { */ void clearScroll(ClearScrollRequest request, ActionListener listener); + FieldStatsRequestBuilder prepareFieldStats(); + + ActionFuture fieldStats(FieldStatsRequest request); + + void fieldStats(FieldStatsRequest request, ActionListener listener); + /** * Returns this clients settings */ diff --git a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java index 87ade248cd4..0169151fc93 100644 --- a/src/main/java/org/elasticsearch/client/ClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/ClusterAdminClient.java @@ -29,9 +29,6 @@ import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsRes import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -248,29 +245,6 @@ public interface ClusterAdminClient extends ElasticsearchClient nodesShutdown(NodesShutdownRequest request); - - /** - * Shutdown nodes in the cluster. - * - * @param request The nodes shutdown request - * @param listener A listener to be notified with a result - * @see org.elasticsearch.client.Requests#nodesShutdownRequest(String...) - */ - void nodesShutdown(NodesShutdownRequest request, ActionListener listener); - - /** - * Shutdown nodes in the cluster. - */ - NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds); - /** * Returns list of shards the given search would be executed on. */ diff --git a/src/main/java/org/elasticsearch/client/Requests.java b/src/main/java/org/elasticsearch/client/Requests.java index d8717ffb095..13fce3326fe 100644 --- a/src/main/java/org/elasticsearch/client/Requests.java +++ b/src/main/java/org/elasticsearch/client/Requests.java @@ -21,7 +21,6 @@ package org.elasticsearch.client; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; @@ -53,7 +52,6 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsRequest import org.elasticsearch.action.bulk.BulkRequest; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.index.IndexRequest; @@ -113,18 +111,6 @@ public class Requests { return new BulkRequest(); } - /** - * Creates a delete by query request. Note, the query itself must be set either by setting the JSON source - * of the query, or by using a {@link org.elasticsearch.index.query.QueryBuilder} (using {@link org.elasticsearch.index.query.QueryBuilders}). - * - * @param indices The indices the delete by query against. Use null or _all to execute against all indices - * @return The delete by query request - * @see org.elasticsearch.client.Client#deleteByQuery(org.elasticsearch.action.deletebyquery.DeleteByQueryRequest) - */ - public static DeleteByQueryRequest deleteByQueryRequest(String... indices) { - return new DeleteByQueryRequest(indices); - } - /** * Creates a get request to get the JSON source from an index based on a type and id. Note, the * {@link GetRequest#type(String)} and {@link GetRequest#id(String)} must be set. @@ -423,24 +409,6 @@ public class Requests { return new ClusterStatsRequest(); } - /** - * Shuts down all nodes in the cluster. - */ - public static NodesShutdownRequest nodesShutdownRequest() { - return new NodesShutdownRequest(); - } - - /** - * Shuts down the specified nodes in the cluster. - * - * @param nodesIds The nodes ids to get the status for - * @return The nodes info request - * @see org.elasticsearch.client.ClusterAdminClient#nodesShutdown(org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest) - */ - public static NodesShutdownRequest nodesShutdownRequest(String... nodesIds) { - return new NodesShutdownRequest(nodesIds); - } - /** * Registers snapshot repository * diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClient.java index 55f74434b0f..7bfdfa14198 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -32,10 +32,6 @@ import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteRequestBuilder; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.exists.ExistsRequestBuilder; @@ -44,6 +40,10 @@ import org.elasticsearch.action.explain.ExplainAction; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainRequestBuilder; import org.elasticsearch.action.explain.ExplainResponse; +import org.elasticsearch.action.fieldstats.FieldStatsAction; +import org.elasticsearch.action.fieldstats.FieldStatsRequest; +import org.elasticsearch.action.fieldstats.FieldStatsRequestBuilder; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexAction; import org.elasticsearch.action.index.IndexRequest; @@ -168,21 +168,6 @@ public abstract class AbstractClient implements Client { return new BulkRequestBuilder(this); } - @Override - public ActionFuture deleteByQuery(final DeleteByQueryRequest request) { - return execute(DeleteByQueryAction.INSTANCE, request); - } - - @Override - public void deleteByQuery(final DeleteByQueryRequest request, final ActionListener listener) { - execute(DeleteByQueryAction.INSTANCE, request, listener); - } - - @Override - public DeleteByQueryRequestBuilder prepareDeleteByQuery(String... indices) { - return new DeleteByQueryRequestBuilder(this).setIndices(indices); - } - @Override public ActionFuture get(final GetRequest request) { return execute(GetAction.INSTANCE, request); @@ -550,4 +535,19 @@ public abstract class AbstractClient implements Client { public ClearScrollRequestBuilder prepareClearScroll() { return new ClearScrollRequestBuilder(this); } + + @Override + public void fieldStats(FieldStatsRequest request, ActionListener listener) { + execute(FieldStatsAction.INSTANCE, request, listener); + } + + @Override + public ActionFuture fieldStats(FieldStatsRequest request) { + return execute(FieldStatsAction.INSTANCE, request); + } + + @Override + public FieldStatsRequestBuilder prepareFieldStats() { + return new FieldStatsRequestBuilder(this); + } } diff --git a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java index 1ff8b42d7d4..f4a6e58e49a 100644 --- a/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java +++ b/src/main/java/org/elasticsearch/client/support/AbstractClusterAdminClient.java @@ -33,10 +33,6 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoAction; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequest; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoRequestBuilder; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequestBuilder; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequest; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsRequestBuilder; @@ -233,21 +229,6 @@ public abstract class AbstractClusterAdminClient implements ClusterAdminClient { return new NodesHotThreadsRequestBuilder(this).setNodesIds(nodesIds); } - @Override - public ActionFuture nodesShutdown(final NodesShutdownRequest request) { - return execute(NodesShutdownAction.INSTANCE, request); - } - - @Override - public void nodesShutdown(final NodesShutdownRequest request, final ActionListener listener) { - execute(NodesShutdownAction.INSTANCE, request, listener); - } - - @Override - public NodesShutdownRequestBuilder prepareNodesShutdown(String... nodesIds) { - return new NodesShutdownRequestBuilder(this).setNodesIds(nodesIds); - } - @Override public ActionFuture searchShards(final ClusterSearchShardsRequest request) { return execute(ClusterSearchShardsAction.INSTANCE, request); diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClient.java b/src/main/java/org/elasticsearch/client/transport/TransportClient.java index 2e4b995bf46..6dd30b02af3 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClient.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClient.java @@ -29,8 +29,6 @@ import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.get.GetRequest; @@ -109,7 +107,7 @@ public class TransportClient extends AbstractClient { * Constructs a new transport client with settings loaded either from the classpath or the file system (the * elasticsearch.(yml|json) files optionally prefixed with config/). */ - public TransportClient() throws ElasticsearchException { + public TransportClient() { this(ImmutableSettings.Builder.EMPTY_SETTINGS, true); } @@ -138,7 +136,7 @@ public class TransportClient extends AbstractClient { * @param loadConfigSettings true if settings should be loaded from the classpath/file system. * @throws org.elasticsearch.ElasticsearchException */ - public TransportClient(Settings.Builder settings, boolean loadConfigSettings) throws ElasticsearchException { + public TransportClient(Settings.Builder settings, boolean loadConfigSettings) { this(settings.build(), loadConfigSettings); } @@ -151,7 +149,7 @@ public class TransportClient extends AbstractClient { * @param loadConfigSettings true if settings should be loaded from the classpath/file system. * @throws org.elasticsearch.ElasticsearchException */ - public TransportClient(Settings pSettings, boolean loadConfigSettings) throws ElasticsearchException { + public TransportClient(Settings pSettings, boolean loadConfigSettings) { Tuple tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings); Settings settings = settingsBuilder() .put(NettyTransport.PING_SCHEDULE, "5s") // enable by default the transport schedule ping interval @@ -352,16 +350,6 @@ public class TransportClient extends AbstractClient { internalClient.bulk(request, listener); } - @Override - public ActionFuture deleteByQuery(DeleteByQueryRequest request) { - return internalClient.deleteByQuery(request); - } - - @Override - public void deleteByQuery(DeleteByQueryRequest request, ActionListener listener) { - internalClient.deleteByQuery(request, listener); - } - @Override public ActionFuture get(GetRequest request) { return internalClient.get(request); diff --git a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java index cb237087886..e2cf962f65a 100644 --- a/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java +++ b/src/main/java/org/elasticsearch/client/transport/TransportClientNodesService.java @@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -39,9 +38,11 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.threadpool.ThreadPool; @@ -143,7 +144,7 @@ public class TransportClientNodesService extends AbstractComponent { public TransportClientNodesService addTransportAddresses(TransportAddress... transportAddresses) { synchronized (mutex) { if (closed) { - throw new ElasticsearchIllegalStateException("transport client is closed, can't add an address"); + throw new IllegalStateException("transport client is closed, can't add an address"); } List filtered = Lists.newArrayListWithExpectedSize(transportAddresses.length); for (TransportAddress transportAddress : transportAddresses) { @@ -178,7 +179,7 @@ public class TransportClientNodesService extends AbstractComponent { public TransportClientNodesService removeTransportAddress(TransportAddress transportAddress) { synchronized (mutex) { if (closed) { - throw new ElasticsearchIllegalStateException("transport client is closed, can't remove an address"); + throw new IllegalStateException("transport client is closed, can't remove an address"); } ImmutableList.Builder builder = ImmutableList.builder(); for (DiscoveryNode otherNode : listedNodes) { @@ -194,11 +195,11 @@ public class TransportClientNodesService extends AbstractComponent { return this; } - public void execute(NodeListenerCallback callback, ActionListener listener) throws ElasticsearchException { + public void execute(NodeListenerCallback callback, ActionListener listener) { ImmutableList nodes = this.nodes; ensureNodesAreAvailable(nodes); int index = getNodeNumber(); - RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index); + RetryListener retryListener = new RetryListener<>(callback, listener, nodes, index, threadPool, logger); DiscoveryNode node = nodes.get((index) % nodes.size()); try { callback.doWithNode(node, retryListener); @@ -212,15 +213,20 @@ public class TransportClientNodesService extends AbstractComponent { private final NodeListenerCallback callback; private final ActionListener listener; private final ImmutableList nodes; + private final ESLogger logger; private final int index; + private ThreadPool threadPool; private volatile int i; - public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, int index) { + public RetryListener(NodeListenerCallback callback, ActionListener listener, ImmutableList nodes, + int index, ThreadPool threadPool, ESLogger logger) { this.callback = callback; this.listener = listener; this.nodes = nodes; this.index = index; + this.threadPool = threadPool; + this.logger = logger; } @Override @@ -233,19 +239,38 @@ public class TransportClientNodesService extends AbstractComponent { if (ExceptionsHelper.unwrapCause(e) instanceof ConnectTransportException) { int i = ++this.i; if (i >= nodes.size()) { - listener.onFailure(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); + runFailureInListenerThreadPool(new NoNodeAvailableException("None of the configured nodes were available: " + nodes, e)); } else { try { callback.doWithNode(nodes.get((index + i) % nodes.size()), this); - } catch(Throwable t) { - //this exception can't come from the TransportService as it doesn't throw exceptions at all - listener.onFailure(t); + } catch(final Throwable t) { + // this exception can't come from the TransportService as it doesn't throw exceptions at all + runFailureInListenerThreadPool(t); } } } else { - listener.onFailure(e); + runFailureInListenerThreadPool(e); } } + + // need to ensure to not block the netty I/O thread, in case of retry due to the node sampling + private void runFailureInListenerThreadPool(final Throwable t) { + threadPool.executor(ThreadPool.Names.LISTENER).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + listener.onFailure(t); + } + + @Override + public void onFailure(Throwable t) { + if (logger.isDebugEnabled()) { + logger.debug("Could not execute failure listener: [{}]", t, t.getMessage()); + } else { + logger.error("Could not execute failure listener: [{}]", t.getMessage()); + } + } + }); + } } public void close() { diff --git a/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java new file mode 100644 index 00000000000..4e6da2bd569 --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/AbstractDiffable.java @@ -0,0 +1,108 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamableReader; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Abstract diffable object with simple diffs implementation that sends the entire object if object has changed or + * nothing is object remained the same. + */ +public abstract class AbstractDiffable> implements Diffable { + + @Override + public Diff diff(T previousState) { + if (this.get().equals(previousState)) { + return new CompleteDiff<>(); + } else { + return new CompleteDiff<>(get()); + } + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new CompleteDiff<>(this, in); + } + + public static > Diff readDiffFrom(StreamableReader reader, StreamInput in) throws IOException { + return new CompleteDiff(reader, in); + } + + private static class CompleteDiff> implements Diff { + + @Nullable + private final T part; + + /** + * Creates simple diff with changes + */ + public CompleteDiff(T part) { + this.part = part; + } + + /** + * Creates simple diff without changes + */ + public CompleteDiff() { + this.part = null; + } + + /** + * Read simple diff from the stream + */ + public CompleteDiff(StreamableReader reader, StreamInput in) throws IOException { + if (in.readBoolean()) { + this.part = reader.readFrom(in); + } else { + this.part = null; + } + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + if (part != null) { + out.writeBoolean(true); + part.writeTo(out); + } else { + out.writeBoolean(false); + } + } + + @Override + public T apply(T part) { + if (this.part != null) { + return this.part; + } else { + return part; + } + } + } + + @SuppressWarnings("unchecked") + public T get() { + return (T) this; + } +} + diff --git a/src/main/java/org/elasticsearch/cluster/ClusterService.java b/src/main/java/org/elasticsearch/cluster/ClusterService.java index f456edd8ddb..4ee9afb3e98 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterService.java @@ -19,11 +19,11 @@ package org.elasticsearch.cluster; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.OperationRouting; import org.elasticsearch.cluster.service.PendingClusterTask; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.LifecycleComponent; import org.elasticsearch.common.unit.TimeValue; @@ -49,12 +49,12 @@ public interface ClusterService extends LifecycleComponent { /** * Adds an initial block to be set on the first cluster state created. */ - void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException; + void addInitialStateBlock(ClusterBlock block) throws IllegalStateException; /** * Remove an initial block to be set on the first cluster state created. */ - void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException; + void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException; /** * The operation routing. @@ -95,8 +95,10 @@ public interface ClusterService extends LifecycleComponent { * Adds a cluster state listener that will timeout after the provided timeout, * and is executed after the clusterstate has been successfully applied ie. is * in state {@link org.elasticsearch.cluster.ClusterState.ClusterStateStatus#APPLIED} + * NOTE: a {@code null} timeout means that the listener will never be removed + * automatically */ - void add(TimeValue timeout, TimeoutClusterStateListener listener); + void add(@Nullable TimeValue timeout, TimeoutClusterStateListener listener); /** * Submits a task that will update the cluster state. diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java index ef4d67740dc..4f63d9e00e3 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -22,7 +22,7 @@ package org.elasticsearch.cluster; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -56,7 +56,9 @@ import java.util.Map; /** * */ -public class ClusterState implements ToXContent { +public class ClusterState implements ToXContent, Diffable { + + public static final ClusterState PROTO = builder(ClusterName.DEFAULT).build(); public static enum ClusterStateStatus { UNKNOWN((byte) 0), @@ -75,47 +77,43 @@ public class ClusterState implements ToXContent { } } - public interface Custom { + public interface Custom extends Diffable, ToXContent { - interface Factory { - - String type(); - - T readFrom(StreamInput in) throws IOException; - - void writeTo(T customState, StreamOutput out) throws IOException; - - void toXContent(T customState, XContentBuilder builder, ToXContent.Params params); - } + String type(); } - private final static Map customFactories = new HashMap<>(); + private final static Map customPrototypes = new HashMap<>(); /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom state factory registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) { + @SuppressWarnings("unchecked") + T proto = (T)customPrototypes.get(type); + if (proto == null) { + throw new IllegalArgumentException("No custom state prototype registered for type [" + type + "]"); } - return factory; + return proto; } + public static final String UNKNOWN_UUID = "_na_"; public static final long UNKNOWN_VERSION = -1; private final long version; + private final String uuid; + private final RoutingTable routingTable; private final DiscoveryNodes nodes; @@ -128,17 +126,20 @@ public class ClusterState implements ToXContent { private final ClusterName clusterName; + private final boolean wasReadFromDiff; + // built on demand private volatile RoutingNodes routingNodes; private volatile ClusterStateStatus status; - public ClusterState(long version, ClusterState state) { - this(state.clusterName, version, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs()); + public ClusterState(long version, String uuid, ClusterState state) { + this(state.clusterName, version, uuid, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false); } - public ClusterState(ClusterName clusterName, long version, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs) { + public ClusterState(ClusterName clusterName, long version, String uuid, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap customs, boolean wasReadFromDiff) { this.version = version; + this.uuid = uuid; this.clusterName = clusterName; this.metaData = metaData; this.routingTable = routingTable; @@ -146,6 +147,7 @@ public class ClusterState implements ToXContent { this.blocks = blocks; this.customs = customs; this.status = ClusterStateStatus.UNKNOWN; + this.wasReadFromDiff = wasReadFromDiff; } public ClusterStateStatus status() { @@ -165,6 +167,14 @@ public class ClusterState implements ToXContent { return version(); } + /** + * This uuid is automatically generated for for each version of cluster state. It is used to make sure that + * we are applying diffs to the right previous state. + */ + public String uuid() { + return this.uuid; + } + public DiscoveryNodes nodes() { return this.nodes; } @@ -217,6 +227,11 @@ public class ClusterState implements ToXContent { return this.clusterName; } + // Used for testing and logging to determine how this cluster state was send over the wire + boolean wasReadFromDiff() { + return wasReadFromDiff; + } + /** * Returns a built (on demand) routing nodes view of the routing table. NOTE, the routing nodes * are mutable, use them just for read operations @@ -232,6 +247,8 @@ public class ClusterState implements ToXContent { public String prettyPrint() { StringBuilder sb = new StringBuilder(); sb.append("version: ").append(version).append("\n"); + sb.append("uuid: ").append(uuid).append("\n"); + sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); sb.append("meta data version: ").append(metaData.version()).append("\n"); sb.append(nodes().prettyPrint()); sb.append(routingTable().prettyPrint()); @@ -288,7 +305,7 @@ public class ClusterState implements ToXContent { Metric m = valueToEnum.get(metric); if (m == null) { if (!ignoreUnknown) { - throw new ElasticsearchIllegalArgumentException("Unknown metric [" + metric + "]"); + throw new IllegalArgumentException("Unknown metric [" + metric + "]"); } } else { result.add(m); @@ -303,14 +320,13 @@ public class ClusterState implements ToXContent { } } - - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { EnumSet metrics = Metric.parseString(params.param("metric", "_all"), true); if (metrics.contains(Metric.VERSION)) { builder.field("version", version); + builder.field("uuid", uuid); } if (metrics.contains(Metric.MASTER_NODE)) { @@ -435,7 +451,7 @@ public class ClusterState implements ToXContent { for (ObjectObjectCursor cursor : metaData.customs()) { builder.startObject(cursor.key); - MetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -474,7 +490,7 @@ public class ClusterState implements ToXContent { builder.startObject("nodes"); for (RoutingNode routingNode : readOnlyRoutingNodes()) { - builder.startArray(routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); + builder.startArray(routingNode.nodeId() == null ? "null" : routingNode.nodeId(), XContentBuilder.FieldCaseConversion.NONE); for (ShardRouting shardRouting : routingNode) { shardRouting.toXContent(builder, params); } @@ -487,7 +503,7 @@ public class ClusterState implements ToXContent { if (metrics.contains(Metric.CUSTOMS)) { for (ObjectObjectCursor cursor : customs) { builder.startObject(cursor.key); - lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } } @@ -507,21 +523,25 @@ public class ClusterState implements ToXContent { private final ClusterName clusterName; private long version = 0; + private String uuid = UNKNOWN_UUID; private MetaData metaData = MetaData.EMPTY_META_DATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; + private boolean fromDiff; public Builder(ClusterState state) { this.clusterName = state.clusterName; this.version = state.version(); + this.uuid = state.uuid(); this.nodes = state.nodes(); this.routingTable = state.routingTable(); this.metaData = state.metaData(); this.blocks = state.blocks(); this.customs = ImmutableOpenMap.builder(state.customs()); + this.fromDiff = false; } public Builder(ClusterName clusterName) { @@ -575,6 +595,17 @@ public class ClusterState implements ToXContent { return this; } + public Builder incrementVersion() { + this.version = version + 1; + this.uuid = UNKNOWN_UUID; + return this; + } + + public Builder uuid(String uuid) { + this.uuid = uuid; + return this; + } + public Custom getCustom(String type) { return customs.get(type); } @@ -589,13 +620,26 @@ public class ClusterState implements ToXContent { return this; } + public Builder customs(ImmutableOpenMap customs) { + this.customs.putAll(customs); + return this; + } + + public Builder fromDiff(boolean fromDiff) { + this.fromDiff = fromDiff; + return this; + } + public ClusterState build() { - return new ClusterState(clusterName, version, metaData, routingTable, nodes, blocks, customs.build()); + if (UNKNOWN_UUID.equals(uuid)) { + uuid = Strings.randomBase64UUID(); + } + return new ClusterState(clusterName, version, uuid, metaData, routingTable, nodes, blocks, customs.build(), fromDiff); } public static byte[] toBytes(ClusterState state) throws IOException { BytesStreamOutput os = new BytesStreamOutput(); - writeTo(state, os); + state.writeTo(os); return os.bytes().toBytes(); } @@ -607,39 +651,152 @@ public class ClusterState implements ToXContent { return readFrom(new BytesStreamInput(data), localNode); } - public static void writeTo(ClusterState state, StreamOutput out) throws IOException { - state.clusterName.writeTo(out); - out.writeLong(state.version()); - MetaData.Builder.writeTo(state.metaData(), out); - RoutingTable.Builder.writeTo(state.routingTable(), out); - DiscoveryNodes.Builder.writeTo(state.nodes(), out); - ClusterBlocks.Builder.writeClusterBlocks(state.blocks(), out); - out.writeVInt(state.customs().size()); - for (ObjectObjectCursor cursor : state.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } - } - /** * @param in input stream * @param localNode used to set the local node in the cluster state. can be null. */ public static ClusterState readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - ClusterName clusterName = ClusterName.readClusterName(in); + return PROTO.readFrom(in, localNode); + } + + } + + @Override + public Diff diff(ClusterState previousState) { + return new ClusterStateDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new ClusterStateDiff(in, this); + } + + public ClusterState readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + ClusterName clusterName = ClusterName.readClusterName(in); + Builder builder = new Builder(clusterName); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.metaData = MetaData.Builder.readFrom(in); + builder.routingTable = RoutingTable.Builder.readFrom(in); + builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); + builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public ClusterState readFrom(StreamInput in) throws IOException { + return readFrom(in, nodes.localNode()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeLong(version); + out.writeString(uuid); + metaData.writeTo(out); + routingTable.writeTo(out); + nodes.writeTo(out); + blocks.writeTo(out); + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + + private static class ClusterStateDiff implements Diff { + + private final long toVersion; + + private final String fromUuid; + + private final String toUuid; + + private final ClusterName clusterName; + + private final Diff routingTable; + + private final Diff nodes; + + private final Diff metaData; + + private final Diff blocks; + + private final Diff> customs; + + public ClusterStateDiff(ClusterState before, ClusterState after) { + fromUuid = before.uuid; + toUuid = after.uuid; + toVersion = after.version; + clusterName = after.clusterName; + routingTable = after.routingTable.diff(before.routingTable); + nodes = after.nodes.diff(before.nodes); + metaData = after.metaData.diff(before.metaData); + blocks = after.blocks.diff(before.blocks); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public ClusterStateDiff(StreamInput in, ClusterState proto) throws IOException { + clusterName = ClusterName.readClusterName(in); + fromUuid = in.readString(); + toUuid = in.readString(); + toVersion = in.readLong(); + routingTable = proto.routingTable.readDiffFrom(in); + nodes = proto.nodes.readDiffFrom(in); + metaData = proto.metaData.readDiffFrom(in); + blocks = proto.blocks.readDiffFrom(in); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + clusterName.writeTo(out); + out.writeString(fromUuid); + out.writeString(toUuid); + out.writeLong(toVersion); + routingTable.writeTo(out); + nodes.writeTo(out); + metaData.writeTo(out); + blocks.writeTo(out); + customs.writeTo(out); + } + + @Override + public ClusterState apply(ClusterState state) { Builder builder = new Builder(clusterName); - builder.version = in.readLong(); - builder.metaData = MetaData.Builder.readFrom(in); - builder.routingTable = RoutingTable.Builder.readFrom(in); - builder.nodes = DiscoveryNodes.Builder.readFrom(in, localNode); - builder.blocks = ClusterBlocks.Builder.readClusterBlocks(in); - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); + if (toUuid.equals(state.uuid)) { + // no need to read the rest - cluster state didn't change + return state; } + if (fromUuid.equals(state.uuid) == false) { + throw new IncompatibleClusterStateVersionException(state.version, state.uuid, toVersion, fromUuid); + } + builder.uuid(toUuid); + builder.version(toVersion); + builder.routingTable(routingTable.apply(state.routingTable)); + builder.nodes(nodes.apply(state.nodes)); + builder.metaData(metaData.apply(state.metaData)); + builder.blocks(blocks.apply(state.blocks)); + builder.customs(customs.apply(state.customs)); + builder.fromDiff(true); return builder.build(); } } + } diff --git a/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java b/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java index a8909636932..28df5aa8017 100644 --- a/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java +++ b/src/main/java/org/elasticsearch/cluster/ClusterStateObserver.java @@ -42,18 +42,18 @@ public class ClusterStateObserver { return changedEvent.previousState().version() != changedEvent.state().version(); } }; - private ClusterService clusterService; + + private final ClusterService clusterService; volatile TimeValue timeOutValue; final AtomicReference lastObservedState; + final TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); // observingContext is not null when waiting on cluster state changes final AtomicReference observingContext = new AtomicReference(null); - volatile long startTime; + volatile Long startTime; volatile boolean timedOut; - volatile TimeoutClusterStateListener clusterStateListener = new ObserverClusterStateListener(); - public ClusterStateObserver(ClusterService clusterService, ESLogger logger) { this(clusterService, new TimeValue(60000), logger); @@ -65,10 +65,12 @@ public class ClusterStateObserver { * will fail any existing or new #waitForNextChange calls. */ public ClusterStateObserver(ClusterService clusterService, TimeValue timeout, ESLogger logger) { - this.timeOutValue = timeout; this.clusterService = clusterService; this.lastObservedState = new AtomicReference<>(new ObservedState(clusterService.state())); - this.startTime = System.currentTimeMillis(); + this.timeOutValue = timeout; + if (timeOutValue != null) { + this.startTime = System.currentTimeMillis(); + } this.logger = logger; } @@ -108,19 +110,24 @@ public class ClusterStateObserver { if (observingContext.get() != null) { throw new ElasticsearchException("already waiting for a cluster state change"); } - long timeoutTimeLeft; + + Long timeoutTimeLeft; if (timeOutValue == null) { timeOutValue = this.timeOutValue; - long timeSinceStart = System.currentTimeMillis() - startTime; - timeoutTimeLeft = timeOutValue.millis() - timeSinceStart; - if (timeoutTimeLeft <= 0l) { - // things have timeout while we were busy -> notify - logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStart)); - // update to latest, in case people want to retry - timedOut = true; - lastObservedState.set(new ObservedState(clusterService.state())); - listener.onTimeout(timeOutValue); - return; + if (timeOutValue != null) { + long timeSinceStart = System.currentTimeMillis() - startTime; + timeoutTimeLeft = timeOutValue.millis() - timeSinceStart; + if (timeoutTimeLeft <= 0l) { + // things have timeout while we were busy -> notify + logger.debug("observer timed out. notifying listener. timeout setting [{}], time since start [{}]", timeOutValue, new TimeValue(timeSinceStart)); + // update to latest, in case people want to retry + timedOut = true; + lastObservedState.set(new ObservedState(clusterService.state())); + listener.onTimeout(timeOutValue); + return; + } + } else { + timeoutTimeLeft = null; } } else { this.startTime = System.currentTimeMillis(); @@ -143,7 +150,7 @@ public class ClusterStateObserver { if (!observingContext.compareAndSet(null, context)) { throw new ElasticsearchException("already waiting for a cluster state change"); } - clusterService.add(new TimeValue(timeoutTimeLeft), clusterStateListener); + clusterService.add(timeoutTimeLeft == null ? null : new TimeValue(timeoutTimeLeft), clusterStateListener); } } diff --git a/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java b/src/main/java/org/elasticsearch/cluster/Diff.java similarity index 62% rename from src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java rename to src/main/java/org/elasticsearch/cluster/Diff.java index 98c6f784d3b..2e571f43bca 100644 --- a/src/main/java/org/elasticsearch/transport/BaseTransportRequestHandler.java +++ b/src/main/java/org/elasticsearch/cluster/Diff.java @@ -17,18 +17,26 @@ * under the License. */ -package org.elasticsearch.transport; +package org.elasticsearch.cluster; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; /** - * A simple based class that always spawns. + * Represents difference between states of cluster state parts */ -public abstract class BaseTransportRequestHandler implements TransportRequestHandler { +public interface Diff { /** - * Default force execution to false. + * Applies difference to the specified part and retunrs the resulted part */ - @Override - public boolean isForceExecution() { - return false; - } -} \ No newline at end of file + T apply(T part); + + /** + * Writes the differences into the output stream + * @param out + * @throws IOException + */ + void writeTo(StreamOutput out) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java b/src/main/java/org/elasticsearch/cluster/Diffable.java similarity index 58% rename from src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java rename to src/main/java/org/elasticsearch/cluster/Diffable.java index fee962c733b..7ce60047a2b 100644 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCache.java +++ b/src/main/java/org/elasticsearch/cluster/Diffable.java @@ -17,23 +17,26 @@ * under the License. */ -package org.elasticsearch.index.cache.query.parser; +package org.elasticsearch.cluster; -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.elasticsearch.index.IndexComponent; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.io.stream.StreamInput; -import java.io.Closeable; +import java.io.IOException; /** - * The main benefit of the query parser cache is to not parse the same query string on different shards. - * Less about long running query strings. + * Cluster state part, changes in which can be serialized */ -public interface QueryParserCache extends IndexComponent, Closeable { +public interface Diffable extends Writeable { - Query get(QueryParserSettings queryString); + /** + * Returns serializable object representing differences between this and previousState + */ + Diff diff(T previousState); - void put(QueryParserSettings queryString, Query query); + /** + * Reads the {@link org.elasticsearch.cluster.Diff} from StreamInput + */ + Diff readDiffFrom(StreamInput in) throws IOException; - void clear(); } diff --git a/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java new file mode 100644 index 00000000000..4e912a34f97 --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -0,0 +1,283 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.google.common.collect.Lists.newArrayList; +import static com.google.common.collect.Maps.newHashMap; + +public final class DiffableUtils { + private DiffableUtils() { + } + + /** + * Calculates diff between two ImmutableOpenMaps of Diffable objects + */ + public static > Diff> diff(ImmutableOpenMap before, ImmutableOpenMap after) { + assert after != null && before != null; + return new ImmutableOpenMapDiff<>(before, after); + } + + /** + * Calculates diff between two ImmutableMaps of Diffable objects + */ + public static > Diff> diff(ImmutableMap before, ImmutableMap after) { + assert after != null && before != null; + return new ImmutableMapDiff<>(before, after); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static > Diff> readImmutableOpenMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { + return new ImmutableOpenMapDiff<>(in, keyedReader); + } + + /** + * Loads an object that represents difference between two ImmutableMaps + */ + public static > Diff> readImmutableMapDiff(StreamInput in, KeyedReader keyedReader) throws IOException { + return new ImmutableMapDiff<>(in, keyedReader); + } + + /** + * Loads an object that represents difference between two ImmutableOpenMaps + */ + public static > Diff> readImmutableOpenMapDiff(StreamInput in, T proto) throws IOException { + return new ImmutableOpenMapDiff<>(in, new PrototypeReader<>(proto)); + } + + /** + * Loads an object that represents difference between two ImmutableMaps + */ + public static > Diff> readImmutableMapDiff(StreamInput in, T proto) throws IOException { + return new ImmutableMapDiff<>(in, new PrototypeReader<>(proto)); + } + + /** + * A reader that can deserialize an object. The reader can select the deserialization type based on the key. It's + * used in custom metadata deserialization. + */ + public interface KeyedReader { + + /** + * reads an object of the type T from the stream input + */ + T readFrom(StreamInput in, String key) throws IOException; + + /** + * reads an object that respresents differences between two objects with the type T from the stream input + */ + Diff readDiffFrom(StreamInput in, String key) throws IOException; + } + + /** + * Implementation of the KeyedReader that is using a prototype object for reading operations + * + * Note: this implementation is ignoring the key. + */ + public static class PrototypeReader> implements KeyedReader { + private T proto; + + public PrototypeReader(T proto) { + this.proto = proto; + } + + @Override + public T readFrom(StreamInput in, String key) throws IOException { + return proto.readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return proto.readDiffFrom(in); + } + } + + /** + * Represents differences between two ImmutableMaps of diffable objects + * + * @param the diffable object + */ + private static class ImmutableMapDiff> extends MapDiff> { + + protected ImmutableMapDiff(StreamInput in, KeyedReader reader) throws IOException { + super(in, reader); + } + + public ImmutableMapDiff(ImmutableMap before, ImmutableMap after) { + assert after != null && before != null; + for (String key : before.keySet()) { + if (!after.containsKey(key)) { + deletes.add(key); + } + } + for (ImmutableMap.Entry partIter : after.entrySet()) { + T beforePart = before.get(partIter.getKey()); + if (beforePart == null) { + adds.put(partIter.getKey(), partIter.getValue()); + } else if (partIter.getValue().equals(beforePart) == false) { + diffs.put(partIter.getKey(), partIter.getValue().diff(beforePart)); + } + } + } + + @Override + public ImmutableMap apply(ImmutableMap map) { + HashMap builder = newHashMap(); + builder.putAll(map); + + for (String part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry additon : adds.entrySet()) { + builder.put(additon.getKey(), additon.getValue()); + } + return ImmutableMap.copyOf(builder); + } + } + + /** + * Represents differences between two ImmutableOpenMap of diffable objects + * + * @param the diffable object + */ + private static class ImmutableOpenMapDiff> extends MapDiff> { + + protected ImmutableOpenMapDiff(StreamInput in, KeyedReader reader) throws IOException { + super(in, reader); + } + + public ImmutableOpenMapDiff(ImmutableOpenMap before, ImmutableOpenMap after) { + assert after != null && before != null; + for (ObjectCursor key : before.keys()) { + if (!after.containsKey(key.value)) { + deletes.add(key.value); + } + } + for (ObjectObjectCursor partIter : after) { + T beforePart = before.get(partIter.key); + if (beforePart == null) { + adds.put(partIter.key, partIter.value); + } else if (partIter.value.equals(beforePart) == false) { + diffs.put(partIter.key, partIter.value.diff(beforePart)); + } + } + } + + @Override + public ImmutableOpenMap apply(ImmutableOpenMap map) { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.putAll(map); + + for (String part : deletes) { + builder.remove(part); + } + + for (Map.Entry> diff : diffs.entrySet()) { + builder.put(diff.getKey(), diff.getValue().apply(builder.get(diff.getKey()))); + } + + for (Map.Entry additon : adds.entrySet()) { + builder.put(additon.getKey(), additon.getValue()); + } + return builder.build(); + } + } + + /** + * Represents differences between two maps of diffable objects + * + * This class is used as base class for different map implementations + * + * @param the diffable object + */ + private static abstract class MapDiff, M> implements Diff { + + protected final List deletes; + protected final Map> diffs; + protected final Map adds; + + protected MapDiff() { + deletes = newArrayList(); + diffs = newHashMap(); + adds = newHashMap(); + } + + protected MapDiff(StreamInput in, KeyedReader reader) throws IOException { + deletes = newArrayList(); + diffs = newHashMap(); + adds = newHashMap(); + int deletesCount = in.readVInt(); + for (int i = 0; i < deletesCount; i++) { + deletes.add(in.readString()); + } + + int diffsCount = in.readVInt(); + for (int i = 0; i < diffsCount; i++) { + String key = in.readString(); + Diff diff = reader.readDiffFrom(in, key); + diffs.put(key, diff); + } + + int addsCount = in.readVInt(); + for (int i = 0; i < addsCount; i++) { + String key = in.readString(); + T part = reader.readFrom(in, key); + adds.put(key, part); + } + } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(deletes.size()); + for (String delete : deletes) { + out.writeString(delete); + } + + out.writeVInt(diffs.size()); + for (Map.Entry> entry : diffs.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + + out.writeVInt(adds.size()); + for (Map.Entry entry : adds.entrySet()) { + out.writeString(entry.getKey()); + entry.getValue().writeTo(out); + } + } + } +} diff --git a/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java new file mode 100644 index 00000000000..92f5897bf2e --- /dev/null +++ b/src/main/java/org/elasticsearch/cluster/IncompatibleClusterStateVersionException.java @@ -0,0 +1,35 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import org.elasticsearch.ElasticsearchException; + +/** + * Thrown by {@link Diffable#readDiffAndApply(org.elasticsearch.common.io.stream.StreamInput)} method + */ +public class IncompatibleClusterStateVersionException extends ElasticsearchException { + public IncompatibleClusterStateVersionException(String msg) { + super(msg); + } + + public IncompatibleClusterStateVersionException(long expectedVersion, String expectedUuid, long receivedVersion, String receivedUuid) { + super("Expected diff for version " + expectedVersion + " with uuid " + expectedUuid + " got version " + receivedVersion + " and uuid " + receivedUuid); + } +} diff --git a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index 6c5e92b3799..2010f67d703 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -19,61 +19,30 @@ package org.elasticsearch.cluster.action.index; -import com.google.common.collect.ImmutableMap; - -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.IndicesRequest; -import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.master.MasterNodeOperationRequest; -import org.elasticsearch.action.support.master.TransportMasterNodeOperationAction; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaDataMappingService; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.compress.CompressedString; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.IndicesAdminClient; +import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.common.util.concurrent.EsExecutors; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.node.settings.NodeSettingsService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.TransportService; -import java.io.IOException; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; /** * Called by shards in the cluster when their mapping was dynamically updated and it needs to be updated * in the cluster state meta data (and broadcast to all members). */ -public class MappingUpdatedAction extends TransportMasterNodeOperationAction { +public class MappingUpdatedAction extends AbstractComponent { public static final String INDICES_MAPPING_DYNAMIC_TIMEOUT = "indices.mapping.dynamic_timeout"; - public static final String ACTION_NAME = "internal:cluster/mapping_updated"; - - private final MetaDataMappingService metaDataMappingService; - - private volatile MasterMappingUpdater masterMappingUpdater; + private IndicesAdminClient client; private volatile TimeValue dynamicMappingUpdateTimeout; class ApplySettings implements NodeSettingsService.Listener { @@ -89,44 +58,58 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationActionof())); - final CompressedString mappingSource = new CompressedString(builder.endObject().bytes()); - masterMappingUpdater.add(new MappingChange(index, indexUUID, type, mappingSource, listener)); - } catch (IOException bogus) { - throw new AssertionError("Cannot happen", bogus); + return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString()) + .setMasterNodeTimeout(timeout).setTimeout(timeout); + } + + public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) { + final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout); + if (listener == null) { + request.execute(); + } else { + final ActionListener actionListener = new ActionListener() { + @Override + public void onResponse(PutMappingResponse response) { + if (response.isAcknowledged()) { + listener.onMappingUpdate(); + } else { + listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]")); + } + } + + @Override + public void onFailure(Throwable e) { + listener.onFailure(e); + } + }; + request.execute(actionListener); } } + public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { + updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null); + } + /** * Same as {@link #updateMappingOnMasterSynchronously(String, String, String, Mapping, TimeValue)} * using the default timeout. */ - public void updateMappingOnMasterSynchronously(String index, String indexUUID, String type, Mapping mappingUpdate) throws Throwable { - updateMappingOnMasterSynchronously(index, indexUUID, type, mappingUpdate, dynamicMappingUpdateTimeout); + public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Throwable { + updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout); } /** @@ -134,179 +117,9 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction listener) throws ElasticsearchException { - metaDataMappingService.updateMapping(request.index(), request.indexUUID(), request.type(), request.mappingSource(), request.nodeId, new ActionListener() { - @Override - public void onResponse(ClusterStateUpdateResponse response) { - listener.onResponse(new MappingUpdatedResponse()); - } - - @Override - public void onFailure(Throwable t) { - logger.warn("[{}] update-mapping [{}] failed to dynamically update the mapping in cluster_state from shard", t, request.index(), request.type()); - listener.onFailure(t); - } - }); - } - - public static class MappingUpdatedResponse extends ActionResponse { - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - } - } - - public static class MappingUpdatedRequest extends MasterNodeOperationRequest implements IndicesRequest { - - private String index; - private String indexUUID = IndexMetaData.INDEX_UUID_NA_VALUE; - private String type; - private CompressedString mappingSource; - private String nodeId = null; // null means not set - - MappingUpdatedRequest() { - } - - public MappingUpdatedRequest(String index, String indexUUID, String type, CompressedString mappingSource, String nodeId) { - this.index = index; - this.indexUUID = indexUUID; - this.type = type; - this.mappingSource = mappingSource; - this.nodeId = nodeId; - } - - public String index() { - return index; - } - - @Override - public IndicesOptions indicesOptions() { - return IndicesOptions.strictSingleIndexNoExpandForbidClosed(); - } - - @Override - public String[] indices() { - return new String[]{index}; - } - - public String indexUUID() { - return indexUUID; - } - - public String type() { - return type; - } - - public CompressedString mappingSource() { - return mappingSource; - } - - /** - * Returns null for not set. - */ - public String nodeId() { - return this.nodeId; - } - - @Override - public ActionRequestValidationException validate() { - return null; - } - - @Override - public void readFrom(StreamInput in) throws IOException { - super.readFrom(in); - index = in.readString(); - type = in.readString(); - mappingSource = CompressedString.readCompressedString(in); - indexUUID = in.readString(); - nodeId = in.readOptionalString(); - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - out.writeString(index); - out.writeString(type); - mappingSource.writeTo(out); - out.writeString(indexUUID); - out.writeOptionalString(nodeId); - } - - @Override - public String toString() { - return "index [" + index + "], indexUUID [" + indexUUID + "], type [" + type + "] and source [" + mappingSource + "]"; - } - } - - private static class MappingChange { - public final String index; - public final String indexUUID; - public final String type; - public final CompressedString mappingSource; - public final MappingUpdateListener listener; - - MappingChange(String index, String indexUUID, String type, CompressedString mappingSource, MappingUpdateListener listener) { - this.index = index; - this.indexUUID = indexUUID; - this.type = type; - this.mappingSource = mappingSource; - this.listener = listener; + public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Throwable { + if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) { + throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]"); } } @@ -319,90 +132,4 @@ public class MappingUpdatedAction extends TransportMasterNodeOperationAction - * It also allows to reduce multiple mapping updates on the same index(UUID) and type into one update - * (refreshSource + sending to master), which allows to offload the number of times mappings are updated - * and sent to master for heavy single index requests that each introduce a new mapping, and when - * multiple shards exists on the same nodes, allowing to work on the index level in this case. - */ - private class MasterMappingUpdater extends Thread { - - private volatile boolean running = true; - private final BlockingQueue queue = ConcurrentCollections.newBlockingQueue(); - - public MasterMappingUpdater(String name) { - super(name); - } - - public void add(MappingChange change) { - queue.add(change); - } - - public void close() { - running = false; - this.interrupt(); - } - - @Override - public void run() { - while (running) { - MappingUpdateListener listener = null; - try { - final MappingChange change = queue.poll(10, TimeUnit.MINUTES); - if (change == null) { - continue; - } - listener = change.listener; - - final MappingUpdatedAction.MappingUpdatedRequest mappingRequest; - try { - DiscoveryNode node = clusterService.localNode(); - mappingRequest = new MappingUpdatedAction.MappingUpdatedRequest( - change.index, change.indexUUID, change.type, change.mappingSource, node != null ? node.id() : null - ); - } catch (Throwable t) { - logger.warn("Failed to update master on updated mapping for index [" + change.index + "], type [" + change.type + "]", t); - if (change.listener != null) { - change.listener.onFailure(t); - } - continue; - } - logger.trace("sending mapping updated to master: {}", mappingRequest); - execute(mappingRequest, new ActionListener() { - @Override - public void onResponse(MappingUpdatedAction.MappingUpdatedResponse mappingUpdatedResponse) { - logger.debug("successfully updated master with mapping update: {}", mappingRequest); - if (change.listener != null) { - change.listener.onMappingUpdate(); - } - } - - @Override - public void onFailure(Throwable e) { - logger.warn("failed to update master on updated mapping for {}", e, mappingRequest); - if (change.listener != null) { - change.listener.onFailure(e); - } - } - }); - } catch (Throwable t) { - if (listener != null) { - // even if the failure is expected, eg. if we got interrupted, - // we need to notify the listener as there might be a latch - // waiting for it to be called - listener.onFailure(t); - } - if (t instanceof InterruptedException && !running) { - // all is well, we are shutting down - } else { - logger.warn("failed to process mapping update", t); - } - } - } - } - } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java index 32203545009..1bf3d3b6b90 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeIndexDeletedAction.java @@ -55,12 +55,12 @@ public class NodeIndexDeletedAction extends AbstractComponent { private final IndicesService indicesService; @Inject - public NodeIndexDeletedAction(Settings settings, ThreadPool threadPool, TransportService transportService, NodeEnvironment nodeEnv, IndicesService indicesService) { + public NodeIndexDeletedAction(Settings settings, ThreadPool threadPool, TransportService transportService, IndicesService indicesService) { super(settings); this.threadPool = threadPool; this.transportService = transportService; - transportService.registerHandler(INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedTransportHandler()); - transportService.registerHandler(INDEX_STORE_DELETED_ACTION_NAME, new NodeIndexStoreDeletedTransportHandler()); + transportService.registerRequestHandler(INDEX_DELETED_ACTION_NAME, NodeIndexDeletedMessage.class, ThreadPool.Names.SAME, new NodeIndexDeletedTransportHandler()); + transportService.registerRequestHandler(INDEX_STORE_DELETED_ACTION_NAME, NodeIndexStoreDeletedMessage.class, ThreadPool.Names.SAME, new NodeIndexStoreDeletedTransportHandler()); this.indicesService = indicesService; } @@ -72,7 +72,7 @@ public class NodeIndexDeletedAction extends AbstractComponent { listeners.remove(listener); } - public void nodeIndexDeleted(final ClusterState clusterState, final String index, final Settings indexSettings, final String nodeId) throws ElasticsearchException { + public void nodeIndexDeleted(final ClusterState clusterState, final String index, final Settings indexSettings, final String nodeId) { final DiscoveryNodes nodes = clusterState.nodes(); transportService.sendRequest(clusterState.nodes().masterNode(), INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME); @@ -113,12 +113,7 @@ public class NodeIndexDeletedAction extends AbstractComponent { void onNodeIndexStoreDeleted(String index, String nodeId); } - private class NodeIndexDeletedTransportHandler extends BaseTransportRequestHandler { - - @Override - public NodeIndexDeletedMessage newInstance() { - return new NodeIndexDeletedMessage(); - } + private class NodeIndexDeletedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeIndexDeletedMessage message, TransportChannel channel) throws Exception { @@ -127,19 +122,9 @@ public class NodeIndexDeletedAction extends AbstractComponent { } channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } - private class NodeIndexStoreDeletedTransportHandler extends BaseTransportRequestHandler { - - @Override - public NodeIndexStoreDeletedMessage newInstance() { - return new NodeIndexStoreDeletedMessage(); - } + private class NodeIndexStoreDeletedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeIndexStoreDeletedMessage message, TransportChannel channel) throws Exception { @@ -148,11 +133,6 @@ public class NodeIndexDeletedAction extends AbstractComponent { } channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class NodeIndexDeletedMessage extends TransportRequest { diff --git a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java index c14452a0fef..5623dd96d6f 100644 --- a/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/index/NodeMappingRefreshAction.java @@ -52,10 +52,10 @@ public class NodeMappingRefreshAction extends AbstractComponent { super(settings); this.transportService = transportService; this.metaDataMappingService = metaDataMappingService; - transportService.registerHandler(ACTION_NAME, new NodeMappingRefreshTransportHandler()); + transportService.registerRequestHandler(ACTION_NAME, NodeMappingRefreshRequest.class, ThreadPool.Names.SAME, new NodeMappingRefreshTransportHandler()); } - public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) throws ElasticsearchException { + public void nodeMappingRefresh(final ClusterState state, final NodeMappingRefreshRequest request) { final DiscoveryNodes nodes = state.nodes(); if (nodes.masterNode() == null) { logger.warn("can't send mapping refresh for [{}][{}], no master known.", request.index(), Strings.arrayToCommaDelimitedString(request.types())); @@ -64,23 +64,13 @@ public class NodeMappingRefreshAction extends AbstractComponent { transportService.sendRequest(nodes.masterNode(), ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME); } - private class NodeMappingRefreshTransportHandler extends BaseTransportRequestHandler { - - @Override - public NodeMappingRefreshRequest newInstance() { - return new NodeMappingRefreshRequest(); - } + private class NodeMappingRefreshTransportHandler implements TransportRequestHandler { @Override public void messageReceived(NodeMappingRefreshRequest request, TransportChannel channel) throws Exception { metaDataMappingService.refreshMapping(request.index(), request.indexUUID(), request.types()); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } public static class NodeMappingRefreshRequest extends TransportRequest implements IndicesRequest { diff --git a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java index feb72fc7078..ecbc27bd266 100644 --- a/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java +++ b/src/main/java/org/elasticsearch/cluster/action/shard/ShardStateAction.java @@ -72,11 +72,11 @@ public class ShardStateAction extends AbstractComponent { this.allocationService = allocationService; this.routingService = routingService; - transportService.registerHandler(SHARD_STARTED_ACTION_NAME, new ShardStartedTransportHandler()); - transportService.registerHandler(SHARD_FAILED_ACTION_NAME, new ShardFailedTransportHandler()); + transportService.registerRequestHandler(SHARD_STARTED_ACTION_NAME, ShardRoutingEntry.class, ThreadPool.Names.SAME, new ShardStartedTransportHandler()); + transportService.registerRequestHandler(SHARD_FAILED_ACTION_NAME, ShardRoutingEntry.class, ThreadPool.Names.SAME, new ShardFailedTransportHandler()); } - public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) throws ElasticsearchException { + public void shardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason) { DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); if (masterNode == null) { logger.warn("can't send shard failed for {}, no master known.", shardRouting); @@ -85,7 +85,7 @@ public class ShardStateAction extends AbstractComponent { innerShardFailed(shardRouting, indexUUID, reason, masterNode); } - public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason, final DiscoveryNode masterNode) throws ElasticsearchException { + public void resendShardFailed(final ShardRouting shardRouting, final String indexUUID, final String reason, final DiscoveryNode masterNode) { logger.trace("{} re-sending failed shard for {}, indexUUID [{}], reason [{}]", shardRouting.shardId(), shardRouting, indexUUID, reason); innerShardFailed(shardRouting, indexUUID, reason, masterNode); } @@ -101,7 +101,7 @@ public class ShardStateAction extends AbstractComponent { }); } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) throws ElasticsearchException { + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason) { DiscoveryNode masterNode = clusterService.state().nodes().masterNode(); if (masterNode == null) { logger.warn("can't send shard started for {}. no master known.", shardRouting); @@ -110,7 +110,7 @@ public class ShardStateAction extends AbstractComponent { shardStarted(shardRouting, indexUUID, reason, masterNode); } - public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) throws ElasticsearchException { + public void shardStarted(final ShardRouting shardRouting, String indexUUID, final String reason, final DiscoveryNode masterNode) { ShardRoutingEntry shardRoutingEntry = new ShardRoutingEntry(shardRouting, indexUUID, reason); @@ -287,42 +287,22 @@ public class ShardStateAction extends AbstractComponent { }); } - private class ShardFailedTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardRoutingEntry newInstance() { - return new ShardRoutingEntry(); - } + private class ShardFailedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { handleShardFailureOnMaster(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } - class ShardStartedTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardRoutingEntry newInstance() { - return new ShardRoutingEntry(); - } + class ShardStartedTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardRoutingEntry request, TransportChannel channel) throws Exception { shardStartedOnMaster(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class ShardRoutingEntry extends TransportRequest { @@ -335,7 +315,7 @@ public class ShardStateAction extends AbstractComponent { volatile boolean processed; // state field, no need to serialize - private ShardRoutingEntry() { + ShardRoutingEntry() { } private ShardRoutingEntry(ShardRouting shardRouting, String indexUUID, String reason) { diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java index 2c88da8986d..45ff1d3707b 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlockLevel.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.block; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.EnumSet; @@ -29,9 +28,10 @@ import java.util.EnumSet; public enum ClusterBlockLevel { READ(0), WRITE(1), - METADATA(2); + METADATA_READ(2), + METADATA_WRITE(3); - public static final EnumSet ALL = EnumSet.of(READ, WRITE, METADATA); + public static final EnumSet ALL = EnumSet.of(READ, WRITE, METADATA_READ, METADATA_WRITE); public static final EnumSet READ_WRITE = EnumSet.of(READ, WRITE); private final int id; @@ -44,14 +44,16 @@ public enum ClusterBlockLevel { return this.id; } - public static ClusterBlockLevel fromId(int id) { + static ClusterBlockLevel fromId(int id) { if (id == 0) { return READ; } else if (id == 1) { return WRITE; } else if (id == 2) { - return METADATA; + return METADATA_READ; + } else if (id == 3) { + return METADATA_WRITE; } - throw new ElasticsearchIllegalArgumentException("No cluster block level matching [" + id + "]"); + throw new IllegalArgumentException("No cluster block level matching [" + id + "]"); } } diff --git a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index bb7d332de4f..95c0ba7127e 100644 --- a/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; import com.google.common.collect.Sets; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,10 +37,12 @@ import java.util.Set; /** * Represents current cluster level blocks to block dirty operations done against the cluster. */ -public class ClusterBlocks { +public class ClusterBlocks extends AbstractDiffable { public static final ClusterBlocks EMPTY_CLUSTER_BLOCK = new ClusterBlocks(ImmutableSet.of(), ImmutableMap.>of()); + public static final ClusterBlocks PROTO = EMPTY_CLUSTER_BLOCK; + private final ImmutableSet global; private final ImmutableMap> indicesBlocks; @@ -203,6 +206,43 @@ public class ClusterBlocks { return new ClusterBlockException(builder.build()); } + @Override + public void writeTo(StreamOutput out) throws IOException { + writeBlockSet(global, out); + out.writeVInt(indicesBlocks.size()); + for (Map.Entry> entry : indicesBlocks.entrySet()) { + out.writeString(entry.getKey()); + writeBlockSet(entry.getValue(), out); + } + } + + private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { + out.writeVInt(blocks.size()); + for (ClusterBlock block : blocks) { + block.writeTo(out); + } + } + + @Override + public ClusterBlocks readFrom(StreamInput in) throws IOException { + ImmutableSet global = readBlockSet(in); + ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); + int size = in.readVInt(); + for (int j = 0; j < size; j++) { + indicesBuilder.put(in.readString().intern(), readBlockSet(in)); + } + return new ClusterBlocks(global, indicesBuilder.build()); + } + + private static ImmutableSet readBlockSet(StreamInput in) throws IOException { + ImmutableSet.Builder builder = ImmutableSet.builder(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.add(ClusterBlock.readClusterBlock(in)); + } + return builder.build(); + } + static class ImmutableLevelHolder { static final ImmutableLevelHolder EMPTY = new ImmutableLevelHolder(ImmutableSet.of(), ImmutableMap.>of()); @@ -313,38 +353,7 @@ public class ClusterBlocks { } public static ClusterBlocks readClusterBlocks(StreamInput in) throws IOException { - ImmutableSet global = readBlockSet(in); - ImmutableMap.Builder> indicesBuilder = ImmutableMap.builder(); - int size = in.readVInt(); - for (int j = 0; j < size; j++) { - indicesBuilder.put(in.readString().intern(), readBlockSet(in)); - } - return new ClusterBlocks(global, indicesBuilder.build()); - } - - public static void writeClusterBlocks(ClusterBlocks blocks, StreamOutput out) throws IOException { - writeBlockSet(blocks.global(), out); - out.writeVInt(blocks.indices().size()); - for (Map.Entry> entry : blocks.indices().entrySet()) { - out.writeString(entry.getKey()); - writeBlockSet(entry.getValue(), out); - } - } - - private static void writeBlockSet(ImmutableSet blocks, StreamOutput out) throws IOException { - out.writeVInt(blocks.size()); - for (ClusterBlock block : blocks) { - block.writeTo(out); - } - } - - private static ImmutableSet readBlockSet(StreamInput in) throws IOException { - ImmutableSet.Builder builder = ImmutableSet.builder(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.add(ClusterBlock.readClusterBlock(in)); - } - return builder.build(); + return PROTO.readFrom(in); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java index 29bcba360bb..878082bf318 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasAction.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -59,7 +58,7 @@ public class AliasAction implements Streamable { } else if (value == 1) { return REMOVE; } else { - throw new ElasticsearchIllegalArgumentException("No type for action [" + value + "]"); + throw new IllegalArgumentException("No type for action [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java index 008935ec026..0f7e55c8087 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasMetaData.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableSet; import org.elasticsearch.ElasticsearchGenerationException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.io.stream.StreamInput; @@ -38,7 +39,9 @@ import java.util.Set; /** * */ -public class AliasMetaData { +public class AliasMetaData extends AbstractDiffable { + + public static final AliasMetaData PROTO = new AliasMetaData("", null, null, null); private final String alias; @@ -146,6 +149,48 @@ public class AliasMetaData { return result; } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(alias()); + if (filter() != null) { + out.writeBoolean(true); + filter.writeTo(out); + } else { + out.writeBoolean(false); + } + if (indexRouting() != null) { + out.writeBoolean(true); + out.writeString(indexRouting()); + } else { + out.writeBoolean(false); + } + if (searchRouting() != null) { + out.writeBoolean(true); + out.writeString(searchRouting()); + } else { + out.writeBoolean(false); + } + + } + + @Override + public AliasMetaData readFrom(StreamInput in) throws IOException { + String alias = in.readString(); + CompressedString filter = null; + if (in.readBoolean()) { + filter = CompressedString.readCompressedString(in); + } + String indexRouting = null; + if (in.readBoolean()) { + indexRouting = in.readString(); + } + String searchRouting = null; + if (in.readBoolean()) { + searchRouting = in.readString(); + } + return new AliasMetaData(alias, filter, indexRouting, searchRouting); + } + public static class Builder { private final String alias; @@ -294,44 +339,12 @@ public class AliasMetaData { return builder.build(); } - public static void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { - out.writeString(aliasMetaData.alias()); - if (aliasMetaData.filter() != null) { - out.writeBoolean(true); - aliasMetaData.filter.writeTo(out); - } else { - out.writeBoolean(false); - } - if (aliasMetaData.indexRouting() != null) { - out.writeBoolean(true); - out.writeString(aliasMetaData.indexRouting()); - } else { - out.writeBoolean(false); - } - if (aliasMetaData.searchRouting() != null) { - out.writeBoolean(true); - out.writeString(aliasMetaData.searchRouting()); - } else { - out.writeBoolean(false); - } - + public void writeTo(AliasMetaData aliasMetaData, StreamOutput out) throws IOException { + aliasMetaData.writeTo(out); } public static AliasMetaData readFrom(StreamInput in) throws IOException { - String alias = in.readString(); - CompressedString filter = null; - if (in.readBoolean()) { - filter = CompressedString.readCompressedString(in); - } - String indexRouting = null; - if (in.readBoolean()) { - indexRouting = in.readString(); - } - String searchRouting = null; - if (in.readBoolean()) { - searchRouting = in.readString(); - } - return new AliasMetaData(alias, filter, indexRouting, searchRouting); + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java b/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java index 72f7a599488..e774f0d82ce 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/AliasValidator.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; @@ -48,7 +47,7 @@ public class AliasValidator extends AbstractComponent { /** * Allows to validate an {@link org.elasticsearch.cluster.metadata.AliasAction} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasAction(AliasAction aliasAction, MetaData metaData) { validateAlias(aliasAction.alias(), aliasAction.index(), aliasAction.indexRouting(), metaData); @@ -57,7 +56,7 @@ public class AliasValidator extends AbstractComponent { /** * Allows to validate an {@link org.elasticsearch.action.admin.indices.alias.Alias} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAlias(Alias alias, String index, MetaData metaData) { validateAlias(alias.name(), index, alias.indexRouting(), metaData); @@ -66,7 +65,7 @@ public class AliasValidator extends AbstractComponent { /** * Allows to validate an {@link org.elasticsearch.cluster.metadata.AliasMetaData} and make sure * it's valid before it gets added to the index metadata. Doesn't validate the alias filter. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasMetaData(AliasMetaData aliasMetaData, String index, MetaData metaData) { validateAlias(aliasMetaData.alias(), index, aliasMetaData.indexRouting(), metaData); @@ -77,7 +76,7 @@ public class AliasValidator extends AbstractComponent { * Useful with index templates containing aliases. Checks also that it is possible to parse * the alias filter via {@link org.elasticsearch.common.xcontent.XContentParser}, * without validating it as a filter though. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the alias is not valid + * @throws IllegalArgumentException if the alias is not valid */ public void validateAliasStandalone(Alias alias) { validateAliasStandalone(alias.name(), alias.indexRouting()); @@ -86,7 +85,7 @@ public class AliasValidator extends AbstractComponent { XContentParser parser = XContentFactory.xContent(alias.filter()).createParser(alias.filter()); parser.mapAndClose(); } catch (Throwable e) { - throw new ElasticsearchIllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e); + throw new IllegalArgumentException("failed to parse filter for alias [" + alias.name() + "]", e); } } } @@ -95,7 +94,7 @@ public class AliasValidator extends AbstractComponent { validateAliasStandalone(alias, indexRouting); if (!Strings.hasText(index)) { - throw new ElasticsearchIllegalArgumentException("index name is required"); + throw new IllegalArgumentException("index name is required"); } assert metaData != null; @@ -106,17 +105,17 @@ public class AliasValidator extends AbstractComponent { private void validateAliasStandalone(String alias, String indexRouting) { if (!Strings.hasText(alias)) { - throw new ElasticsearchIllegalArgumentException("alias name is required"); + throw new IllegalArgumentException("alias name is required"); } if (indexRouting != null && indexRouting.indexOf(',') != -1) { - throw new ElasticsearchIllegalArgumentException("alias [" + alias + "] has several index routing values associated with it"); + throw new IllegalArgumentException("alias [" + alias + "] has several index routing values associated with it"); } } /** * Validates an alias filter by parsing it using the * provided {@link org.elasticsearch.index.query.IndexQueryParserService} - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the filter is not valid + * @throws IllegalArgumentException if the filter is not valid */ public void validateAliasFilter(String alias, String filter, IndexQueryParserService indexQueryParserService) { assert indexQueryParserService != null; @@ -124,14 +123,14 @@ public class AliasValidator extends AbstractComponent { XContentParser parser = XContentFactory.xContent(filter).createParser(filter); validateAliasFilter(parser, indexQueryParserService); } catch (Throwable e) { - throw new ElasticsearchIllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); + throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); } } /** * Validates an alias filter by parsing it using the * provided {@link org.elasticsearch.index.query.IndexQueryParserService} - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the filter is not valid + * @throws IllegalArgumentException if the filter is not valid */ public void validateAliasFilter(String alias, byte[] filter, IndexQueryParserService indexQueryParserService) { assert indexQueryParserService != null; @@ -139,7 +138,7 @@ public class AliasValidator extends AbstractComponent { XContentParser parser = XContentFactory.xContent(filter).createParser(filter); validateAliasFilter(parser, indexQueryParserService); } catch (Throwable e) { - throw new ElasticsearchIllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); + throw new IllegalArgumentException("failed to parse filter for alias [" + alias + "]", e); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index d84250ebcd7..fe76d0f3f2b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -21,17 +21,18 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.Diffable; +import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.routing.HashFunction; import org.elasticsearch.cluster.routing.Murmur3HashFunction; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedString; @@ -61,66 +62,60 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class IndexMetaData { +public class IndexMetaData implements Diffable { + public static final IndexMetaData PROTO = IndexMetaData.builder("") + .settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(1).numberOfReplicas(0).build(); - public interface Custom { + public interface Custom extends Diffable, ToXContent { String type(); - interface Factory { + Custom fromMap(Map map) throws IOException; - String type(); + Custom fromXContent(XContentParser parser) throws IOException; - T readFrom(StreamInput in) throws IOException; - - void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; - - T fromMap(Map map) throws IOException; - - T fromXContent(XContentParser parser) throws IOException; - - void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; - - /** - * Merges from first to second, with first being more important, i.e., if something exists in first and second, - * first will prevail. - */ - T merge(T first, T second); - } + /** + * Merges from this to another, with this being more important, i.e., if something exists in this and another, + * this will prevail. + */ + Custom mergeWith(Custom another); } - public static Map customFactories = new HashMap<>(); + public static Map customPrototypes = new HashMap<>(); static { // register non plugin custom metadata - registerFactory(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.FACTORY); + registerPrototype(IndexWarmersMetaData.TYPE, IndexWarmersMetaData.PROTO); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom index metadata factoy registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) { + //noinspection unchecked + T proto = (T) customPrototypes.get(type); + if (proto == null) { + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); } - return factory; + return proto; } - public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA)); + public static final ClusterBlock INDEX_READ_ONLY_BLOCK = new ClusterBlock(5, "index read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock INDEX_READ_BLOCK = new ClusterBlock(7, "index read (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.READ)); public static final ClusterBlock INDEX_WRITE_BLOCK = new ClusterBlock(8, "index write (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE)); - public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA)); + public static final ClusterBlock INDEX_METADATA_BLOCK = new ClusterBlock(9, "index metadata (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.METADATA_WRITE, ClusterBlockLevel.METADATA_READ)); public static enum State { OPEN((byte) 0), @@ -142,7 +137,7 @@ public class IndexMetaData { } else if (id == 1) { return CLOSE; } - throw new ElasticsearchIllegalStateException("No state match for id [" + id + "]"); + throw new IllegalStateException("No state match for id [" + id + "]"); } public static State fromString(String state) { @@ -151,7 +146,7 @@ public class IndexMetaData { } else if ("close".equals(state)) { return CLOSE; } - throw new ElasticsearchIllegalStateException("No state match for [" + state + "]"); + throw new IllegalStateException("No state match for [" + state + "]"); } } public static final String INDEX_SETTING_PREFIX = "index."; @@ -237,7 +232,7 @@ public class IndexMetaData { try { routingHashFunction = hashFunctionClass.newInstance(); } catch (InstantiationException | IllegalAccessException e) { - throw new ElasticsearchIllegalStateException("Cannot instantiate hash function", e); + throw new IllegalStateException("Cannot instantiate hash function", e); } } useTypeForRouting = settings.getAsBoolean(SETTING_LEGACY_ROUTING_USE_TYPE, false); @@ -453,7 +448,9 @@ public class IndexMetaData { if (state != that.state) { return false; } - + if (!customs.equals(that.customs)) { + return false; + } return true; } @@ -467,6 +464,126 @@ public class IndexMetaData { return result; } + @Override + public Diff diff(IndexMetaData previousState) { + return new IndexMetaDataDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new IndexMetaDataDiff(in); + } + + private static class IndexMetaDataDiff implements Diff { + + private final String index; + private final long version; + private final State state; + private final Settings settings; + private final Diff> mappings; + private final Diff> aliases; + private Diff> customs; + + public IndexMetaDataDiff(IndexMetaData before, IndexMetaData after) { + index = after.index; + version = after.version; + state = after.state; + settings = after.settings; + mappings = DiffableUtils.diff(before.mappings, after.mappings); + aliases = DiffableUtils.diff(before.aliases, after.aliases); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public IndexMetaDataDiff(StreamInput in) throws IOException { + index = in.readString(); + version = in.readLong(); + state = State.fromId(in.readByte()); + settings = ImmutableSettings.readSettingsFromStream(in); + mappings = DiffableUtils.readImmutableOpenMapDiff(in, MappingMetaData.PROTO); + aliases = DiffableUtils.readImmutableOpenMapDiff(in, AliasMetaData.PROTO); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new DiffableUtils.KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeLong(version); + out.writeByte(state.id); + ImmutableSettings.writeSettingsToStream(settings, out); + mappings.writeTo(out); + aliases.writeTo(out); + customs.writeTo(out); + } + + @Override + public IndexMetaData apply(IndexMetaData part) { + Builder builder = builder(index); + builder.version(version); + builder.state(state); + builder.settings(settings); + builder.mappings.putAll(mappings.apply(part.mappings)); + builder.aliases.putAll(aliases.apply(part.aliases)); + builder.customs.putAll(customs.apply(part.customs)); + return builder.build(); + } + } + + @Override + public IndexMetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(in.readString()); + builder.version(in.readLong()); + builder.state(State.fromId(in.readByte())); + builder.settings(readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + MappingMetaData mappingMd = MappingMetaData.PROTO.readFrom(in); + builder.putMapping(mappingMd); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeLong(version); + out.writeByte(state.id()); + writeSettingsToStream(settings, out); + out.writeVInt(mappings.size()); + for (ObjectCursor cursor : mappings.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(aliases.size()); + for (ObjectCursor cursor : aliases.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static Builder builder(String index) { return new Builder(index); } @@ -662,7 +779,7 @@ public class IndexMetaData { for (ObjectObjectCursor cursor : indexMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -709,12 +826,13 @@ public class IndexMetaData { } } else { // check if its a custom index metadata - Custom.Factory factory = lookupFactory(currentFieldName); - if (factory == null) { + Custom proto = lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -743,47 +861,7 @@ public class IndexMetaData { } public static IndexMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.version(in.readLong()); - builder.state(State.fromId(in.readByte())); - builder.settings(readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - MappingMetaData mappingMd = MappingMetaData.readFrom(in); - builder.putMapping(mappingMd); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(IndexMetaData indexMetaData, StreamOutput out) throws IOException { - out.writeString(indexMetaData.index()); - out.writeLong(indexMetaData.version()); - out.writeByte(indexMetaData.state().id()); - writeSettingsToStream(indexMetaData.settings(), out); - out.writeVInt(indexMetaData.mappings().size()); - for (ObjectCursor cursor : indexMetaData.mappings().values()) { - MappingMetaData.writeTo(cursor.value, out); - } - out.writeVInt(indexMetaData.aliases().size()); - for (ObjectCursor cursor : indexMetaData.aliases().values()) { - AliasMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(indexMetaData.customs().size()); - for (ObjectObjectCursor cursor : indexMetaData.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java index 582e008550d..54150ee6a1e 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.Sets; -import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.compress.CompressedString; @@ -42,7 +42,9 @@ import java.util.Set; /** * */ -public class IndexTemplateMetaData { +public class IndexTemplateMetaData extends AbstractDiffable { + + public static final IndexTemplateMetaData PROTO = IndexTemplateMetaData.builder("").build(); private final String name; @@ -161,11 +163,57 @@ public class IndexTemplateMetaData { return result; } + @Override + public IndexTemplateMetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(in.readString()); + builder.order(in.readInt()); + builder.template(in.readString()); + builder.settings(ImmutableSettings.readSettingsFromStream(in)); + int mappingsSize = in.readVInt(); + for (int i = 0; i < mappingsSize; i++) { + builder.putMapping(in.readString(), CompressedString.readCompressedString(in)); + } + int aliasesSize = in.readVInt(); + for (int i = 0; i < aliasesSize; i++) { + AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); + builder.putAlias(aliasMd); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeInt(order); + out.writeString(template); + ImmutableSettings.writeSettingsToStream(settings, out); + out.writeVInt(mappings.size()); + for (ObjectObjectCursor cursor : mappings) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + out.writeVInt(aliases.size()); + for (ObjectCursor cursor : aliases.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static class Builder { private static final Set VALID_FIELDS = Sets.newHashSet("template", "order", "mappings", "settings"); static { - VALID_FIELDS.addAll(IndexMetaData.customFactories.keySet()); + VALID_FIELDS.addAll(IndexMetaData.customPrototypes.keySet()); } private String name; @@ -305,7 +353,7 @@ public class IndexTemplateMetaData { for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE); - IndexMetaData.lookupFactorySafe(cursor.key).toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } @@ -347,12 +395,13 @@ public class IndexTemplateMetaData { } } else { // check if its a custom index metadata - IndexMetaData.Custom.Factory factory = IndexMetaData.lookupFactory(currentFieldName); - if (factory == null) { + IndexMetaData.Custom proto = IndexMetaData.lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + IndexMetaData.Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token == XContentParser.Token.START_ARRAY) { @@ -401,47 +450,7 @@ public class IndexTemplateMetaData { } public static IndexTemplateMetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(in.readString()); - builder.order(in.readInt()); - builder.template(in.readString()); - builder.settings(ImmutableSettings.readSettingsFromStream(in)); - int mappingsSize = in.readVInt(); - for (int i = 0; i < mappingsSize; i++) { - builder.putMapping(in.readString(), CompressedString.readCompressedString(in)); - } - int aliasesSize = in.readVInt(); - for (int i = 0; i < aliasesSize; i++) { - AliasMetaData aliasMd = AliasMetaData.Builder.readFrom(in); - builder.putAlias(aliasMd); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(IndexTemplateMetaData indexTemplateMetaData, StreamOutput out) throws IOException { - out.writeString(indexTemplateMetaData.name()); - out.writeInt(indexTemplateMetaData.order()); - out.writeString(indexTemplateMetaData.template()); - ImmutableSettings.writeSettingsToStream(indexTemplateMetaData.settings(), out); - out.writeVInt(indexTemplateMetaData.mappings().size()); - for (ObjectObjectCursor cursor : indexTemplateMetaData.mappings()) { - out.writeString(cursor.key); - cursor.value.writeTo(out); - } - out.writeVInt(indexTemplateMetaData.aliases().size()); - for (ObjectCursor cursor : indexTemplateMetaData.aliases().values()) { - AliasMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(indexTemplateMetaData.customs().size()); - for (ObjectObjectCursor cursor : indexTemplateMetaData.customs()) { - out.writeString(cursor.key); - IndexMetaData.lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java index f2ace98caeb..7225a43d5ef 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java @@ -19,9 +19,10 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.ElasticsearchIllegalStateException; +import com.google.common.collect.Maps; import org.elasticsearch.Version; import org.elasticsearch.action.TimestampParsingException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedString; @@ -39,14 +40,18 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper; import java.io.IOException; import java.util.Arrays; +import java.util.HashMap; import java.util.Map; +import static com.google.common.collect.Maps.newHashMap; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; /** * Mapping configuration for a type. */ -public class MappingMetaData { +public class MappingMetaData extends AbstractDiffable { + + public static final MappingMetaData PROTO = new MappingMetaData(); public static class Id { @@ -297,7 +302,7 @@ public class MappingMetaData { this.source = mapping; Map mappingMap = XContentHelper.createParser(mapping.compressed(), 0, mapping.compressed().length).mapOrderedAndClose(); if (mappingMap.size() != 1) { - throw new ElasticsearchIllegalStateException("Can't derive type from mapping, no root type: " + mapping.string()); + throw new IllegalStateException("Can't derive type from mapping, no root type: " + mapping.string()); } this.type = mappingMap.keySet().iterator().next(); initMappers((Map) mappingMap.get(this.type)); @@ -318,6 +323,15 @@ public class MappingMetaData { initMappers(withoutType); } + private MappingMetaData() { + this.type = ""; + try { + this.source = new CompressedString(""); + } catch (IOException ex) { + throw new IllegalStateException("Cannot create MappingMetaData prototype", ex); + } + } + private void initMappers(Map withoutType) { if (withoutType.containsKey("_id")) { String path = null; @@ -533,34 +547,35 @@ public class MappingMetaData { } } - public static void writeTo(MappingMetaData mappingMd, StreamOutput out) throws IOException { - out.writeString(mappingMd.type()); - mappingMd.source().writeTo(out); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(type()); + source().writeTo(out); // id - if (mappingMd.id().hasPath()) { + if (id().hasPath()) { out.writeBoolean(true); - out.writeString(mappingMd.id().path()); + out.writeString(id().path()); } else { out.writeBoolean(false); } // routing - out.writeBoolean(mappingMd.routing().required()); - if (mappingMd.routing().hasPath()) { + out.writeBoolean(routing().required()); + if (routing().hasPath()) { out.writeBoolean(true); - out.writeString(mappingMd.routing().path()); + out.writeString(routing().path()); } else { out.writeBoolean(false); } // timestamp - out.writeBoolean(mappingMd.timestamp().enabled()); - out.writeOptionalString(mappingMd.timestamp().path()); - out.writeString(mappingMd.timestamp().format()); - out.writeOptionalString(mappingMd.timestamp().defaultTimestamp()); + out.writeBoolean(timestamp().enabled()); + out.writeOptionalString(timestamp().path()); + out.writeString(timestamp().format()); + out.writeOptionalString(timestamp().defaultTimestamp()); // TODO Remove the test in elasticsearch 2.0.0 if (out.getVersion().onOrAfter(Version.V_1_5_0)) { - out.writeOptionalBoolean(mappingMd.timestamp().ignoreMissing()); + out.writeOptionalBoolean(timestamp().ignoreMissing()); } - out.writeBoolean(mappingMd.hasParentField()); + out.writeBoolean(hasParentField()); } @Override @@ -589,7 +604,7 @@ public class MappingMetaData { return result; } - public static MappingMetaData readFrom(StreamInput in) throws IOException { + public MappingMetaData readFrom(StreamInput in) throws IOException { String type = in.readString(); CompressedString source = CompressedString.readCompressedString(in); // id diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java index 2026a148b26..97a1367d8e8 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaData.java @@ -25,8 +25,9 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.*; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockLevel; import org.elasticsearch.common.Nullable; @@ -56,7 +57,9 @@ import static org.elasticsearch.common.settings.ImmutableSettings.*; /** * */ -public class MetaData implements Iterable { +public class MetaData implements Iterable, Diffable { + + public static final MetaData PROTO = builder().build(); public static final String ALL = "_all"; @@ -68,66 +71,57 @@ public class MetaData implements Iterable { GATEWAY, /* Custom metadata should be stored as part of a snapshot */ - SNAPSHOT; + SNAPSHOT } public static EnumSet API_ONLY = EnumSet.of(XContentContext.API); public static EnumSet API_AND_GATEWAY = EnumSet.of(XContentContext.API, XContentContext.GATEWAY); public static EnumSet API_AND_SNAPSHOT = EnumSet.of(XContentContext.API, XContentContext.SNAPSHOT); - public interface Custom { + public interface Custom extends Diffable, ToXContent { - abstract class Factory { + String type(); - public abstract String type(); + Custom fromXContent(XContentParser parser) throws IOException; - public abstract T readFrom(StreamInput in) throws IOException; - - public abstract void writeTo(T customIndexMetaData, StreamOutput out) throws IOException; - - public abstract T fromXContent(XContentParser parser) throws IOException; - - public abstract void toXContent(T customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException; - - public EnumSet context() { - return API_ONLY; - } - } + EnumSet context(); } - public static Map customFactories = new HashMap<>(); + public static Map customPrototypes = new HashMap<>(); static { // register non plugin custom metadata - registerFactory(RepositoriesMetaData.TYPE, RepositoriesMetaData.FACTORY); - registerFactory(SnapshotMetaData.TYPE, SnapshotMetaData.FACTORY); - registerFactory(RestoreMetaData.TYPE, RestoreMetaData.FACTORY); + registerPrototype(RepositoriesMetaData.TYPE, RepositoriesMetaData.PROTO); + registerPrototype(SnapshotMetaData.TYPE, SnapshotMetaData.PROTO); + registerPrototype(RestoreMetaData.TYPE, RestoreMetaData.PROTO); } /** * Register a custom index meta data factory. Make sure to call it from a static block. */ - public static void registerFactory(String type, Custom.Factory factory) { - customFactories.put(type, factory); + public static void registerPrototype(String type, Custom proto) { + customPrototypes.put(type, proto); } @Nullable - public static Custom.Factory lookupFactory(String type) { - return customFactories.get(type); + public static T lookupPrototype(String type) { + //noinspection unchecked + return (T) customPrototypes.get(type); } - public static Custom.Factory lookupFactorySafe(String type) throws ElasticsearchIllegalArgumentException { - Custom.Factory factory = customFactories.get(type); - if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No custom index metadata factory registered for type [" + type + "]"); + public static T lookupPrototypeSafe(String type) { + //noinspection unchecked + T proto = (T) customPrototypes.get(type); + if (proto == null) { + throw new IllegalArgumentException("No custom metadata prototype registered for type [" + type + "]"); } - return factory; + return proto; } public static final String SETTING_READ_ONLY = "cluster.blocks.read_only"; - public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA)); + public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE)); public static final MetaData EMPTY_META_DATA = builder().build(); @@ -474,20 +468,20 @@ public class MetaData implements Iterable { return routing; } if (indexAliases.size() > 1) { - throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexAliases.keys().toArray(String.class)) + "], can't execute a single index op"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one index associated with it [" + Arrays.toString(indexAliases.keys().toArray(String.class)) + "], can't execute a single index op"); } AliasMetaData aliasMd = indexAliases.values().iterator().next().value; if (aliasMd.indexRouting() != null) { if (routing != null) { if (!routing.equals(aliasMd.indexRouting())) { - throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has index routing associated with it [" + aliasMd.indexRouting() + "], and was provided with routing value [" + routing + "], rejecting operation"); } } routing = aliasMd.indexRouting(); } if (routing != null) { if (routing.indexOf(',') != -1) { - throw new ElasticsearchIllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + routing + "] that resolved to several routing values, rejecting operation"); + throw new IllegalArgumentException("index/alias [" + aliasOrIndex + "] provided with routing value [" + routing + "] that resolved to several routing values, rejecting operation"); } } return routing; @@ -645,16 +639,24 @@ public class MetaData implements Iterable { /** * Translates the provided indices or aliases, eventually containing wildcard expressions, into actual indices. * - * @param indicesOptions how the aliases or indices need to be resolved to concrete indices + * @param indicesOptions how the aliases or indices need to be resolved to concrete indices * @param aliasesOrIndices the aliases or indices to be resolved to concrete indices * @return the obtained concrete indices +<<<<<<< HEAD * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options * don't allow such a case. - * @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided * indices options don't allow such a case. +======= + * @throws IndexMissingException if one of the aliases or indices is missing and the provided indices options + * don't allow such a case, or if the final result of the indices resolution is no indices and the indices options + * don't allow such a case. + * @throws ElasticsearchIllegalArgumentException if one of the aliases resolve to multiple indices and the provided + * indices options don't allow such a case. +>>>>>>> Add support for cluster state diffs */ - public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, ElasticsearchIllegalArgumentException { + public String[] concreteIndices(IndicesOptions indicesOptions, String... aliasesOrIndices) throws IndexMissingException, IllegalArgumentException { if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed()) { if (isAllIndices(aliasesOrIndices)) { String[] concreteIndices; @@ -677,7 +679,7 @@ public class MetaData implements Iterable { if (aliasesOrIndices == null || aliasesOrIndices.length == 0) { if (!indicesOptions.allowNoIndices()) { - throw new ElasticsearchIllegalArgumentException("no indices were specified and wildcard expansion is disabled."); + throw new IllegalArgumentException("no indices were specified and wildcard expansion is disabled."); } else { return Strings.EMPTY_ARRAY; } @@ -734,23 +736,23 @@ public class MetaData implements Iterable { * Utility method that allows to resolve an index or alias to its corresponding single concrete index. * Callers should make sure they provide proper {@link org.elasticsearch.action.support.IndicesOptions} * that require a single index as a result. The indices resolution must in fact return a single index when - * using this method, an {@link org.elasticsearch.ElasticsearchIllegalArgumentException} gets thrown otherwise. + * using this method, an {@link IllegalArgumentException} gets thrown otherwise. * * @param indexOrAlias the index or alias to be resolved to concrete index * @param indicesOptions the indices options to be used for the index resolution * @return the concrete index obtained as a result of the index resolution * @throws IndexMissingException if the index or alias provided doesn't exist - * @throws ElasticsearchIllegalArgumentException if the index resolution lead to more than one index + * @throws IllegalArgumentException if the index resolution lead to more than one index */ - public String concreteSingleIndex(String indexOrAlias, IndicesOptions indicesOptions) throws IndexMissingException, ElasticsearchIllegalArgumentException { + public String concreteSingleIndex(String indexOrAlias, IndicesOptions indicesOptions) throws IndexMissingException, IllegalArgumentException { String[] indices = concreteIndices(indicesOptions, indexOrAlias); if (indices.length != 1) { - throw new ElasticsearchIllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); + throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices"); } return indices[0]; } - private String[] concreteIndices(String aliasOrIndex, IndicesOptions options, boolean failNoIndices) throws IndexMissingException, ElasticsearchIllegalArgumentException { + private String[] concreteIndices(String aliasOrIndex, IndicesOptions options, boolean failNoIndices) throws IndexMissingException, IllegalArgumentException { boolean failClosed = options.forbidClosedIndices() && !options.ignoreUnavailable(); // a quick check, if this is an actual index, if so, return it @@ -772,7 +774,7 @@ public class MetaData implements Iterable { throw new IndexMissingException(new Index(aliasOrIndex)); } if (indices.length > 1 && !options.allowAliasesToMultipleIndices()) { - throw new ElasticsearchIllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one indices associated with it [" + Arrays.toString(indices) + "], can't execute a single index op"); + throw new IllegalArgumentException("Alias [" + aliasOrIndex + "] has more than one indices associated with it [" + Arrays.toString(indices) + "], can't execute a single index op"); } // No need to check whether indices referred by aliases are closed, because there are no closed indices. @@ -1140,14 +1142,14 @@ public class MetaData implements Iterable { // Check if any persistent metadata needs to be saved int customCount1 = 0; for (ObjectObjectCursor cursor : metaData1.customs) { - if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { if (!cursor.value.equals(metaData2.custom(cursor.key))) return false; customCount1++; } } int customCount2 = 0; for (ObjectObjectCursor cursor : metaData2.customs) { - if (customFactories.get(cursor.key).context().contains(XContentContext.GATEWAY)) { + if (customPrototypes.get(cursor.key).context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -1155,6 +1157,129 @@ public class MetaData implements Iterable { return true; } + @Override + public Diff diff(MetaData previousState) { + return new MetaDataDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new MetaDataDiff(in); + } + + private static class MetaDataDiff implements Diff { + + private long version; + + private String uuid; + + private Settings transientSettings; + private Settings persistentSettings; + private Diff> indices; + private Diff> templates; + private Diff> customs; + + + public MetaDataDiff(MetaData before, MetaData after) { + uuid = after.uuid; + version = after.version; + transientSettings = after.transientSettings; + persistentSettings = after.persistentSettings; + indices = DiffableUtils.diff(before.indices, after.indices); + templates = DiffableUtils.diff(before.templates, after.templates); + customs = DiffableUtils.diff(before.customs, after.customs); + } + + public MetaDataDiff(StreamInput in) throws IOException { + uuid = in.readString(); + version = in.readLong(); + transientSettings = ImmutableSettings.readSettingsFromStream(in); + persistentSettings = ImmutableSettings.readSettingsFromStream(in); + indices = DiffableUtils.readImmutableOpenMapDiff(in, IndexMetaData.PROTO); + templates = DiffableUtils.readImmutableOpenMapDiff(in, IndexTemplateMetaData.PROTO); + customs = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public Custom readFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readFrom(in); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return lookupPrototypeSafe(key).readDiffFrom(in); + } + }); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(uuid); + out.writeLong(version); + ImmutableSettings.writeSettingsToStream(transientSettings, out); + ImmutableSettings.writeSettingsToStream(persistentSettings, out); + indices.writeTo(out); + templates.writeTo(out); + customs.writeTo(out); + } + + @Override + public MetaData apply(MetaData part) { + Builder builder = builder(); + builder.uuid(uuid); + builder.version(version); + builder.transientSettings(transientSettings); + builder.persistentSettings(persistentSettings); + builder.indices(indices.apply(part.indices)); + builder.templates(templates.apply(part.templates)); + builder.customs(customs.apply(part.customs)); + return builder.build(); + } + } + + @Override + public MetaData readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + builder.uuid = in.readString(); + builder.transientSettings(readSettingsFromStream(in)); + builder.persistentSettings(readSettingsFromStream(in)); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexMetaData.Builder.readFrom(in), false); + } + size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.put(IndexTemplateMetaData.Builder.readFrom(in)); + } + int customSize = in.readVInt(); + for (int i = 0; i < customSize; i++) { + String type = in.readString(); + Custom customIndexMetaData = lookupPrototypeSafe(type).readFrom(in); + builder.putCustom(type, customIndexMetaData); + } + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + out.writeString(uuid); + writeSettingsToStream(transientSettings, out); + writeSettingsToStream(persistentSettings, out); + out.writeVInt(indices.size()); + for (IndexMetaData indexMetaData : this) { + indexMetaData.writeTo(out); + } + out.writeVInt(templates.size()); + for (ObjectCursor cursor : templates.values()) { + cursor.value.writeTo(out); + } + out.writeVInt(customs.size()); + for (ObjectObjectCursor cursor : customs) { + out.writeString(cursor.key); + cursor.value.writeTo(out); + } + } + public static Builder builder() { return new Builder(); } @@ -1226,6 +1351,11 @@ public class MetaData implements Iterable { return this; } + public Builder indices(ImmutableOpenMap indices) { + this.indices.putAll(indices); + return this; + } + public Builder put(IndexTemplateMetaData.Builder template) { return put(template.build()); } @@ -1240,6 +1370,11 @@ public class MetaData implements Iterable { return this; } + public Builder templates(ImmutableOpenMap templates) { + this.templates.putAll(templates); + return this; + } + public Custom getCustom(String type) { return customs.get(type); } @@ -1254,6 +1389,11 @@ public class MetaData implements Iterable { return this; } + public Builder customs(ImmutableOpenMap customs) { + this.customs.putAll(customs); + return this; + } + public Builder updateSettings(Settings settings, String... indices) { if (indices == null || indices.length == 0) { indices = this.indices.keys().toArray(String.class); @@ -1306,6 +1446,11 @@ public class MetaData implements Iterable { return this; } + public Builder uuid(String uuid) { + this.uuid = uuid; + return this; + } + public Builder generateUuidIfNeeded() { if (uuid.equals("_na_")) { uuid = Strings.randomBase64UUID(); @@ -1364,10 +1509,10 @@ public class MetaData implements Iterable { } for (ObjectObjectCursor cursor : metaData.customs()) { - Custom.Factory factory = lookupFactorySafe(cursor.key); - if (factory.context().contains(context)) { + Custom proto = lookupPrototypeSafe(cursor.key); + if (proto.context().contains(context)) { builder.startObject(cursor.key); - factory.toXContent(cursor.value, builder, params); + cursor.value.toXContent(builder, params); builder.endObject(); } } @@ -1411,12 +1556,13 @@ public class MetaData implements Iterable { } } else { // check if its a custom index metadata - Custom.Factory factory = lookupFactory(currentFieldName); - if (factory == null) { + Custom proto = lookupPrototype(currentFieldName); + if (proto == null) { //TODO warn parser.skipChildren(); } else { - builder.putCustom(factory.type(), factory.fromXContent(parser)); + Custom custom = proto.fromXContent(parser); + builder.putCustom(custom.type(), custom); } } } else if (token.isValue()) { @@ -1431,46 +1577,7 @@ public class MetaData implements Iterable { } public static MetaData readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - builder.uuid = in.readString(); - builder.transientSettings(readSettingsFromStream(in)); - builder.persistentSettings(readSettingsFromStream(in)); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexMetaData.Builder.readFrom(in), false); - } - size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.put(IndexTemplateMetaData.Builder.readFrom(in)); - } - int customSize = in.readVInt(); - for (int i = 0; i < customSize; i++) { - String type = in.readString(); - Custom customIndexMetaData = lookupFactorySafe(type).readFrom(in); - builder.putCustom(type, customIndexMetaData); - } - return builder.build(); - } - - public static void writeTo(MetaData metaData, StreamOutput out) throws IOException { - out.writeLong(metaData.version); - out.writeString(metaData.uuid); - writeSettingsToStream(metaData.transientSettings(), out); - writeSettingsToStream(metaData.persistentSettings(), out); - out.writeVInt(metaData.indices.size()); - for (IndexMetaData indexMetaData : metaData) { - IndexMetaData.Builder.writeTo(indexMetaData, out); - } - out.writeVInt(metaData.templates.size()); - for (ObjectCursor cursor : metaData.templates.values()) { - IndexTemplateMetaData.Builder.writeTo(cursor.value, out); - } - out.writeVInt(metaData.customs().size()); - for (ObjectObjectCursor cursor : metaData.customs()) { - out.writeString(cursor.key); - lookupFactorySafe(cursor.key).writeTo(cursor.value, out); - } + return PROTO.readFrom(in); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 09864fce908..732561f66f1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -26,7 +26,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; @@ -156,7 +155,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { }); } - public void validateIndexName(String index, ClusterState state) throws ElasticsearchException { + public void validateIndexName(String index, ClusterState state) { if (state.routingTable().hasIndex(index)) { throw new IndexAlreadyExistsException(new Index(index)); } @@ -273,7 +272,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (existing == null) { customs.put(type, custom); } else { - IndexMetaData.Custom merged = IndexMetaData.lookupFactorySafe(type).merge(existing, custom); + IndexMetaData.Custom merged = existing.mergeWith(custom); customs.put(type, merged); } } @@ -338,8 +337,7 @@ public class MetaDataCreateIndexService extends AbstractComponent { if (request.index().equals(ScriptService.SCRIPT_INDEX)) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 0)); indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, "0-all"); - } - else { + } else { if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { if (request.index().equals(riverIndexName)) { indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); @@ -552,13 +550,39 @@ public class MetaDataCreateIndexService extends AbstractComponent { return templates; } - private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) throws ElasticsearchException { + private void validate(CreateIndexClusterStateUpdateRequest request, ClusterState state) { validateIndexName(request.index(), state); - String customPath = request.settings().get(IndexMetaData.SETTING_DATA_PATH, null); + validateIndexSettings(request.index(), request.settings()); + } + + public void validateIndexSettings(String indexName, Settings settings) throws IndexCreationException { + String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null); + List validationErrors = Lists.newArrayList(); if (customPath != null && nodeEnv.isCustomPathsEnabled() == false) { - throw new IndexCreationException(new Index(request.index()), - new ElasticsearchIllegalArgumentException("custom data_paths for indices is disabled")); + validationErrors.add("custom data_paths for indices is disabled"); } + Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null); + Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null); + if (number_of_primaries != null && number_of_primaries <= 0) { + validationErrors.add("index must have 1 or more primary shards"); + } + if (number_of_replicas != null && number_of_replicas < 0) { + validationErrors.add("index must have 0 or more replica shards"); + } + if (validationErrors.isEmpty() == false) { + throw new IndexCreationException(new Index(indexName), + new IllegalArgumentException(getMessage(validationErrors))); + } + } + + private String getMessage(List validationErrors) { + StringBuilder sb = new StringBuilder(); + sb.append("Validation Failed: "); + int index = 0; + for (String error : validationErrors) { + sb.append(++index).append(": ").append(error).append(";"); + } + return sb.toString(); } private static class DefaultIndexTemplateFilter implements IndexTemplateFilter { diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java index 5c556b86246..625c8d6e839 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexStateService.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest; @@ -68,7 +67,7 @@ public class MetaDataIndexStateService extends AbstractComponent { public void closeIndex(final CloseIndexClusterStateUpdateRequest request, final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { - throw new ElasticsearchIllegalArgumentException("Index name is required"); + throw new IllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); @@ -128,7 +127,7 @@ public class MetaDataIndexStateService extends AbstractComponent { public void openIndex(final OpenIndexClusterStateUpdateRequest request, final ActionListener listener) { if (request.indices() == null || request.indices().length == 0) { - throw new ElasticsearchIllegalArgumentException("Index name is required"); + throw new IllegalArgumentException("Index name is required"); } final String indicesAsString = Arrays.toString(request.indices()); diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java index 95c806f7d49..755d0077c13 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataIndexTemplateService.java @@ -23,7 +23,6 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.support.master.MasterNodeOperationRequest; import org.elasticsearch.cluster.ClusterService; @@ -111,11 +110,11 @@ public class MetaDataIndexTemplateService extends AbstractComponent { request.settings(updatedSettingsBuilder.build()); if (request.name == null) { - listener.onFailure(new ElasticsearchIllegalArgumentException("index_template must provide a name")); + listener.onFailure(new IllegalArgumentException("index_template must provide a name")); return; } if (request.template == null) { - listener.onFailure(new ElasticsearchIllegalArgumentException("index_template must provide a template")); + listener.onFailure(new IllegalArgumentException("index_template must provide a template")); return; } @@ -178,7 +177,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent { }); } - private void validate(PutRequest request) throws ElasticsearchException { + private void validate(PutRequest request) { if (request.name.contains(" ")) { throw new InvalidIndexTemplateException(request.name, "name must not contain a space"); } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java index 690dcceb534..5bacae63d0c 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataMappingService.java @@ -40,6 +40,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.InvalidTypeNameException; @@ -48,8 +49,6 @@ import org.elasticsearch.percolator.PercolatorService; import java.util.*; import static com.google.common.collect.Maps.newHashMap; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; - /** * Service responsible for submitting mapping changes */ @@ -331,47 +330,6 @@ public class MetaDataMappingService extends AbstractComponent { }); } - public void updateMapping(final String index, final String indexUUID, final String type, final CompressedString mappingSource, final String nodeId, final ActionListener listener) { - final long insertOrder; - synchronized (refreshOrUpdateMutex) { - insertOrder = ++refreshOrUpdateInsertOrder; - refreshOrUpdateQueue.add(new UpdateTask(index, indexUUID, type, mappingSource, nodeId, listener)); - } - clusterService.submitStateUpdateTask("update-mapping [" + index + "][" + type + "] / node [" + nodeId + "]", Priority.HIGH, new ProcessedClusterStateUpdateTask() { - private volatile List allTasks; - - @Override - public void onFailure(String source, Throwable t) { - listener.onFailure(t); - } - - @Override - public ClusterState execute(final ClusterState currentState) throws Exception { - Tuple> tuple = executeRefreshOrUpdate(currentState, insertOrder); - this.allTasks = tuple.v2(); - return tuple.v1(); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - if (allTasks == null) { - return; - } - for (Object task : allTasks) { - if (task instanceof UpdateTask) { - UpdateTask uTask = (UpdateTask) task; - ClusterStateUpdateResponse response = new ClusterStateUpdateResponse(true); - try { - uTask.listener.onResponse(response); - } catch (Throwable t) { - logger.debug("failed to ping back on response of mapping processing for task [{}]", t, uTask.listener); - } - } - } - } - }); - } - public void putMapping(final PutMappingClusterStateUpdateRequest request, final ActionListener listener) { clusterService.submitStateUpdateTask("put-mapping [" + request.type() + "]", Priority.HIGH, new AckedClusterStateUpdateTask(request, listener) { @@ -423,10 +381,10 @@ public class MetaDataMappingService extends AbstractComponent { newMapper = indexService.mapperService().parse(request.type(), new CompressedString(request.source()), existingMapper == null); if (existingMapper != null) { // first, simulate - DocumentMapper.MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = existingMapper.merge(newMapper.mapping(), true); // if we have conflicts, and we are not supposed to ignore them, throw an exception if (!request.ignoreConflicts() && mergeResult.hasConflicts()) { - throw new MergeMappingException(mergeResult.conflicts()); + throw new MergeMappingException(mergeResult.buildConflicts()); } } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java index bc7539a7ff1..8b4e334bade 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/MetaDataUpdateSettingsService.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -172,7 +171,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements // never allow to change the number of shards for (String key : updatedSettingsBuilder.internalMap().keySet()) { if (key.equals(IndexMetaData.SETTING_NUMBER_OF_SHARDS)) { - listener.onFailure(new ElasticsearchIllegalArgumentException("can't change the number of shards for an index")); + listener.onFailure(new IllegalArgumentException("can't change the number of shards for an index")); return; } } @@ -193,7 +192,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } if (!errors.isEmpty()) { - listener.onFailure(new ElasticsearchIllegalArgumentException("can't process the settings: " + errors.toString())); + listener.onFailure(new IllegalArgumentException("can't process the settings: " + errors.toString())); return; } @@ -230,7 +229,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements } if (!removedSettings.isEmpty() && !openIndices.isEmpty()) { - throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT, + throw new IllegalArgumentException(String.format(Locale.ROOT, "Can't update non dynamic settings[%s] for open indices[%s]", removedSettings, openIndices diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java index 81b11fc14b1..51cd5db086b 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetaData.java @@ -21,6 +21,8 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; @@ -39,11 +41,11 @@ import java.util.Map; /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetaData implements MetaData.Custom { +public class RepositoriesMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "repositories"; - public static final Factory FACTORY = new Factory(); + public static final RepositoriesMetaData PROTO = new RepositoriesMetaData(); private final ImmutableList repositories; @@ -80,122 +82,132 @@ public class RepositoriesMetaData implements MetaData.Custom { return null; } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RepositoriesMetaData that = (RepositoriesMetaData) o; + + return repositories.equals(that.repositories); + + } + + @Override + public int hashCode() { + return repositories.hashCode(); + } + /** - * Repository metadata factory + * {@inheritDoc} */ - public static class Factory extends MetaData.Custom.Factory { + @Override + public String type() { + return TYPE; + } - /** - * {@inheritDoc} - */ - @Override - public String type() { - return TYPE; + /** + * {@inheritDoc} + */ + @Override + public Custom readFrom(StreamInput in) throws IOException { + RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; + for (int i = 0; i < repository.length; i++) { + repository[i] = RepositoryMetaData.readFrom(in); } + return new RepositoriesMetaData(repository); + } - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData readFrom(StreamInput in) throws IOException { - RepositoryMetaData[] repository = new RepositoryMetaData[in.readVInt()]; - for (int i = 0; i < repository.length; i++) { - repository[i] = RepositoryMetaData.readFrom(in); - } - return new RepositoriesMetaData(repository); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(RepositoriesMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.repositories().size()); - for (RepositoryMetaData repository : repositories.repositories()) { - repository.writeTo(out); - } - } - - /** - * {@inheritDoc} - */ - @Override - public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - List repository = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String name = parser.currentName(); - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); - } - String type = null; - Settings settings = ImmutableSettings.EMPTY; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("type".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); - } - type = parser.text(); - } else if ("settings".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); - } - settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); - } - } else { - throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); - } - } - if (type == null) { - throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); - } - repository.add(new RepositoryMetaData(name, type, settings)); - } else { - throw new ElasticsearchParseException("failed to parse repositories"); - } - } - return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); - } - - /** - * {@inheritDoc} - */ - @Override - public void toXContent(RepositoriesMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - for (RepositoryMetaData repository : customIndexMetaData.repositories()) { - toXContent(repository, builder, params); - } - } - - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - - /** - * Serializes information about a single repository - * - * @param repository repository metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("type", repository.type()); - builder.startObject("settings"); - for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { - builder.field(settingEntry.getKey(), settingEntry.getValue()); - } - builder.endObject(); - - builder.endObject(); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(repositories.size()); + for (RepositoryMetaData repository : repositories) { + repository.writeTo(out); } } + /** + * {@inheritDoc} + */ + @Override + public RepositoriesMetaData fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + List repository = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String name = parser.currentName(); + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], expected object"); + } + String type = null; + Settings settings = ImmutableSettings.EMPTY; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("type".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown type"); + } + type = parser.text(); + } else if ("settings".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.START_OBJECT) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], incompatible params"); + } + settings = ImmutableSettings.settingsBuilder().put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).build(); + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], unknown field [" + currentFieldName + "]"); + } + } else { + throw new ElasticsearchParseException("failed to parse repository [" + name + "]"); + } + } + if (type == null) { + throw new ElasticsearchParseException("failed to parse repository [" + name + "], missing repository type"); + } + repository.add(new RepositoryMetaData(name, type, settings)); + } else { + throw new ElasticsearchParseException("failed to parse repositories"); + } + } + return new RepositoriesMetaData(repository.toArray(new RepositoryMetaData[repository.size()])); + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + for (RepositoryMetaData repository : repositories) { + toXContent(repository, builder, params); + } + return builder; + } + + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + + /** + * Serializes information about a single repository + * + * @param repository repository metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public static void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(repository.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("type", repository.type()); + builder.startObject("settings"); + for (Map.Entry settingEntry : repository.settings().getAsMap().entrySet()) { + builder.field(settingEntry.getKey(), settingEntry.getValue()); + } + builder.endObject(); + + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java index ea50b30ba88..a283f1f43c1 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RepositoryMetaData.java @@ -99,4 +99,25 @@ public class RepositoryMetaData { out.writeString(type); ImmutableSettings.writeSettingsToStream(settings, out); } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RepositoryMetaData that = (RepositoryMetaData) o; + + if (!name.equals(that.name)) return false; + if (!type.equals(that.type)) return false; + return settings.equals(that.settings); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + settings.hashCode(); + return result; + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java index 373d5ff858c..51fd5e0514a 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/RestoreMetaData.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -30,16 +30,17 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; /** * Meta data about restore processes that are currently executing */ -public class RestoreMetaData implements MetaData.Custom { +public class RestoreMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "restore"; - public static final Factory FACTORY = new Factory(); + public static final RestoreMetaData PROTO = new RestoreMetaData(); private final ImmutableList entries; @@ -389,130 +390,128 @@ public class RestoreMetaData implements MetaData.Custom { case 3: return FAILURE; default: - throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]"); + throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } } } /** - * Restore metadata factory + * {@inheritDoc} */ - public static class Factory extends MetaData.Custom.Factory { + @Override + public String type() { + return TYPE; + } - /** - * {@inheritDoc} - */ - @Override - public String type() { - return TYPE; - } - - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); - builder.put(shardId, shardState); - } - entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); } - return new RestoreMetaData(entries); - } - - /** - * {@inheritDoc} - */ - @Override - public void writeTo(RestoreMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.entries().size()); - for (Entry entry : repositories.entries()) { - entry.snapshotId().writeTo(out); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - shardEntry.getValue().writeTo(out); - } + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in); + builder.put(shardId, shardState); } + entries[i] = new Entry(snapshotId, state, indexBuilder.build(), builder.build()); } + return new RestoreMetaData(entries); + } - /** - * {@inheritDoc} - */ - @Override - public RestoreMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - /** - * {@inheritDoc} - */ - @Override - public void toXContent(RestoreMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray("snapshots"); - for (Entry entry : customIndexMetaData.entries()) { - toXContent(entry, builder, params); + /** + * {@inheritDoc} + */ + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries.size()); + for (Entry entry : entries) { + entry.snapshotId().writeTo(out); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); } - builder.endArray(); - } - - /** - * Serializes single restore operation - * - * @param entry restore operation metadata - * @param builder XContent builder - * @param params serialization parameters - * @throws IOException - */ - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field("snapshot", entry.snapshotId().getSnapshot()); - builder.field("repository", entry.snapshotId().getRepository()); - builder.field("state", entry.state()); - builder.startArray("indices"); - { - for (String index : entry.indices()) { - builder.value(index); - } + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + shardEntry.getValue().writeTo(out); } - builder.endArray(); - builder.startArray("shards"); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardRestoreStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field("index", shardId.getIndex()); - builder.field("shard", shardId.getId()); - builder.field("state", status.state()); - } - builder.endObject(); - } - } - - builder.endArray(); - builder.endObject(); } } + /** + * {@inheritDoc} + */ + @Override + public RestoreMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public EnumSet context() { + return MetaData.API_ONLY; + } + + /** + * {@inheritDoc} + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray("snapshots"); + for (Entry entry : entries) { + toXContent(entry, builder, params); + } + builder.endArray(); + return builder; + } + + /** + * Serializes single restore operation + * + * @param entry restore operation metadata + * @param builder XContent builder + * @param params serialization parameters + * @throws IOException + */ + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field("snapshot", entry.snapshotId().getSnapshot()); + builder.field("repository", entry.snapshotId().getRepository()); + builder.field("state", entry.state()); + builder.startArray("indices"); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.startArray("shards"); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardRestoreStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field("index", shardId.getIndex()); + builder.field("shard", shardId.getId()); + builder.field("state", status.state()); + } + builder.endObject(); + } + } + + builder.endArray(); + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java index b759fe5daeb..b23c58710a0 100644 --- a/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java +++ b/src/main/java/org/elasticsearch/cluster/metadata/SnapshotMetaData.java @@ -21,7 +21,8 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,6 +32,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.shard.ShardId; import java.io.IOException; +import java.util.EnumSet; import java.util.Map; import static com.google.common.collect.Maps.newHashMap; @@ -38,10 +40,10 @@ import static com.google.common.collect.Maps.newHashMap; /** * Meta data about snapshots that are currently executing */ -public class SnapshotMetaData implements MetaData.Custom { +public class SnapshotMetaData extends AbstractDiffable implements MetaData.Custom { public static final String TYPE = "snapshots"; - public static final Factory FACTORY = new Factory(); + public static final SnapshotMetaData PROTO = new SnapshotMetaData(); @Override public boolean equals(Object o) { @@ -301,7 +303,7 @@ public class SnapshotMetaData implements MetaData.Custom { case 6: return WAITING; default: - throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]"); + throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } } } @@ -330,123 +332,123 @@ public class SnapshotMetaData implements MetaData.Custom { return null; } + @Override + public String type() { + return TYPE; + } - public static class Factory extends MetaData.Custom.Factory { - - @Override - public String type() { - return TYPE; //To change body of implemented methods use File | Settings | File Templates. - } - - @Override - public SnapshotMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - SnapshotId snapshotId = SnapshotId.readSnapshotId(in); - boolean includeGlobalState = in.readBoolean(); - State state = State.fromValue(in.readByte()); - int indices = in.readVInt(); - ImmutableList.Builder indexBuilder = ImmutableList.builder(); - for (int j = 0; j < indices; j++) { - indexBuilder.add(in.readString()); - } - long startTime = in.readLong(); - ImmutableMap.Builder builder = ImmutableMap.builder(); - int shards = in.readVInt(); - for (int j = 0; j < shards; j++) { - ShardId shardId = ShardId.readShardId(in); - String nodeId = in.readOptionalString(); - State shardState = State.fromValue(in.readByte()); - builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); - } - entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); + @Override + public SnapshotMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + SnapshotId snapshotId = SnapshotId.readSnapshotId(in); + boolean includeGlobalState = in.readBoolean(); + State state = State.fromValue(in.readByte()); + int indices = in.readVInt(); + ImmutableList.Builder indexBuilder = ImmutableList.builder(); + for (int j = 0; j < indices; j++) { + indexBuilder.add(in.readString()); } - return new SnapshotMetaData(entries); - } - - @Override - public void writeTo(SnapshotMetaData repositories, StreamOutput out) throws IOException { - out.writeVInt(repositories.entries().size()); - for (Entry entry : repositories.entries()) { - entry.snapshotId().writeTo(out); - out.writeBoolean(entry.includeGlobalState()); - out.writeByte(entry.state().value()); - out.writeVInt(entry.indices().size()); - for (String index : entry.indices()) { - out.writeString(index); - } - out.writeLong(entry.startTime()); - out.writeVInt(entry.shards().size()); - for (Map.Entry shardEntry : entry.shards().entrySet()) { - shardEntry.getKey().writeTo(out); - out.writeOptionalString(shardEntry.getValue().nodeId()); - out.writeByte(shardEntry.getValue().state().value()); - } + long startTime = in.readLong(); + ImmutableMap.Builder builder = ImmutableMap.builder(); + int shards = in.readVInt(); + for (int j = 0; j < shards; j++) { + ShardId shardId = ShardId.readShardId(in); + String nodeId = in.readOptionalString(); + State shardState = State.fromValue(in.readByte()); + builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState)); } + entries[i] = new Entry(snapshotId, includeGlobalState, state, indexBuilder.build(), startTime, builder.build()); } + return new SnapshotMetaData(entries); + } - @Override - public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { - throw new UnsupportedOperationException(); - } - - static final class Fields { - static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); - static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); - static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); - static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); - static final XContentBuilderString STATE = new XContentBuilderString("state"); - static final XContentBuilderString INDICES = new XContentBuilderString("indices"); - static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); - static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); - static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); - static final XContentBuilderString INDEX = new XContentBuilderString("index"); - static final XContentBuilderString SHARD = new XContentBuilderString("shard"); - static final XContentBuilderString NODE = new XContentBuilderString("node"); - } - - @Override - public void toXContent(SnapshotMetaData customIndexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startArray(Fields.SNAPSHOTS); - for (Entry entry : customIndexMetaData.entries()) { - toXContent(entry, builder, params); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries.size()); + for (Entry entry : entries) { + entry.snapshotId().writeTo(out); + out.writeBoolean(entry.includeGlobalState()); + out.writeByte(entry.state().value()); + out.writeVInt(entry.indices().size()); + for (String index : entry.indices()) { + out.writeString(index); } - builder.endArray(); - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.startObject(); - builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); - builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); - builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); - builder.field(Fields.STATE, entry.state()); - builder.startArray(Fields.INDICES); - { - for (String index : entry.indices()) { - builder.value(index); - } + out.writeLong(entry.startTime()); + out.writeVInt(entry.shards().size()); + for (Map.Entry shardEntry : entry.shards().entrySet()) { + shardEntry.getKey().writeTo(out); + out.writeOptionalString(shardEntry.getValue().nodeId()); + out.writeByte(shardEntry.getValue().state().value()); } - builder.endArray(); - builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); - builder.startArray(Fields.SHARDS); - { - for (Map.Entry shardEntry : entry.shards.entrySet()) { - ShardId shardId = shardEntry.getKey(); - ShardSnapshotStatus status = shardEntry.getValue(); - builder.startObject(); - { - builder.field(Fields.INDEX, shardId.getIndex()); - builder.field(Fields.SHARD, shardId.getId()); - builder.field(Fields.STATE, status.state()); - builder.field(Fields.NODE, status.nodeId()); - } - builder.endObject(); - } - } - builder.endArray(); - builder.endObject(); } } + @Override + public SnapshotMetaData fromXContent(XContentParser parser) throws IOException { + throw new UnsupportedOperationException(); + } + @Override + public EnumSet context() { + return MetaData.API_ONLY; + } + + static final class Fields { + static final XContentBuilderString REPOSITORY = new XContentBuilderString("repository"); + static final XContentBuilderString SNAPSHOTS = new XContentBuilderString("snapshots"); + static final XContentBuilderString SNAPSHOT = new XContentBuilderString("snapshot"); + static final XContentBuilderString INCLUDE_GLOBAL_STATE = new XContentBuilderString("include_global_state"); + static final XContentBuilderString STATE = new XContentBuilderString("state"); + static final XContentBuilderString INDICES = new XContentBuilderString("indices"); + static final XContentBuilderString START_TIME_MILLIS = new XContentBuilderString("start_time_millis"); + static final XContentBuilderString START_TIME = new XContentBuilderString("start_time"); + static final XContentBuilderString SHARDS = new XContentBuilderString("shards"); + static final XContentBuilderString INDEX = new XContentBuilderString("index"); + static final XContentBuilderString SHARD = new XContentBuilderString("shard"); + static final XContentBuilderString NODE = new XContentBuilderString("node"); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startArray(Fields.SNAPSHOTS); + for (Entry entry : entries) { + toXContent(entry, builder, params); + } + builder.endArray(); + return builder; + } + + public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.startObject(); + builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository()); + builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot()); + builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState()); + builder.field(Fields.STATE, entry.state()); + builder.startArray(Fields.INDICES); + { + for (String index : entry.indices()) { + builder.value(index); + } + } + builder.endArray(); + builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime()); + builder.startArray(Fields.SHARDS); + { + for (Map.Entry shardEntry : entry.shards.entrySet()) { + ShardId shardId = shardEntry.getKey(); + ShardSnapshotStatus status = shardEntry.getValue(); + builder.startObject(); + { + builder.field(Fields.INDEX, shardId.getIndex()); + builder.field(Fields.SHARD, shardId.getId()); + builder.field(Fields.STATE, status.state()); + builder.field(Fields.NODE, status.nodeId()); + } + builder.endObject(); + } + } + builder.endArray(); + builder.endObject(); + } } diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index 13b0a62f0e6..915f0d5599d 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.node; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; @@ -59,7 +58,7 @@ public class DiscoveryNode implements Streamable, Serializable { } else if ("network".equals(nodeMode)) { return false; } else { - throw new ElasticsearchIllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network]."); + throw new IllegalArgumentException("unsupported node.mode [" + nodeMode + "]. Should be one of [local, network]."); } } return false; diff --git a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 0a4986476e5..8692e5fb006 100644 --- a/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -24,8 +24,8 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.UnmodifiableIterator; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -45,9 +45,10 @@ import static com.google.common.collect.Lists.newArrayList; * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. */ -public class DiscoveryNodes implements Iterable { +public class DiscoveryNodes extends AbstractDiffable implements Iterable { public static final DiscoveryNodes EMPTY_NODES = builder().build(); + public static final DiscoveryNodes PROTO = EMPTY_NODES; private final ImmutableOpenMap nodes; private final ImmutableOpenMap dataNodes; @@ -311,15 +312,15 @@ public class DiscoveryNodes implements Iterable { * * @param node id of the node to discover * @return discovered node matching the given id - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if more than one node matches the request or no nodes have been resolved + * @throws IllegalArgumentException if more than one node matches the request or no nodes have been resolved */ public DiscoveryNode resolveNode(String node) { String[] resolvedNodeIds = resolveNodesIds(node); if (resolvedNodeIds.length > 1) { - throw new ElasticsearchIllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node"); + throw new IllegalArgumentException("resolved [" + node + "] into [" + resolvedNodeIds.length + "] nodes, where expected to be resolved to a single node"); } if (resolvedNodeIds.length == 0) { - throw new ElasticsearchIllegalArgumentException("failed to resolve [" + node + " ], no matching nodes"); + throw new IllegalArgumentException("failed to resolve [" + node + " ], no matching nodes"); } return nodes.get(resolvedNodeIds[0]); } @@ -568,6 +569,44 @@ public class DiscoveryNodes implements Iterable { } } + public void writeTo(StreamOutput out) throws IOException { + if (masterNodeId == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeString(masterNodeId); + } + out.writeVInt(nodes.size()); + for (DiscoveryNode node : this) { + node.writeTo(out); + } + } + + public DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) throws IOException { + Builder builder = new Builder(); + if (in.readBoolean()) { + builder.masterNodeId(in.readString()); + } + if (localNode != null) { + builder.localNodeId(localNode.id()); + } + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + DiscoveryNode node = DiscoveryNode.readNode(in); + if (localNode != null && node.id().equals(localNode.id())) { + // reuse the same instance of our address and local node id for faster equality + node = localNode; + } + builder.put(node); + } + return builder.build(); + } + + @Override + public DiscoveryNodes readFrom(StreamInput in) throws IOException { + return readFrom(in, localNode()); + } + public static Builder builder() { return new Builder(); } @@ -632,37 +671,8 @@ public class DiscoveryNodes implements Iterable { return new DiscoveryNodes(nodes.build(), dataNodesBuilder.build(), masterNodesBuilder.build(), masterNodeId, localNodeId, minNodeVersion, minNonClientNodeVersion); } - public static void writeTo(DiscoveryNodes nodes, StreamOutput out) throws IOException { - if (nodes.masterNodeId() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeString(nodes.masterNodeId); - } - out.writeVInt(nodes.size()); - for (DiscoveryNode node : nodes) { - node.writeTo(out); - } - } - public static DiscoveryNodes readFrom(StreamInput in, @Nullable DiscoveryNode localNode) throws IOException { - Builder builder = new Builder(); - if (in.readBoolean()) { - builder.masterNodeId(in.readString()); - } - if (localNode != null) { - builder.localNodeId(localNode.id()); - } - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - DiscoveryNode node = DiscoveryNode.readNode(in); - if (localNode != null && node.id().equals(localNode.id())) { - // reuse the same instance of our address and local node id for faster equality - node = localNode; - } - builder.put(node); - } - return builder.build(); + return PROTO.readFrom(in, localNode); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 5f0356d3572..6aaa260c4b5 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -25,7 +25,7 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.google.common.collect.ImmutableList; import com.google.common.collect.Sets; import com.google.common.collect.UnmodifiableIterator; -import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -56,7 +56,9 @@ import static com.google.common.collect.Lists.newArrayList; * represented as {@link ShardRouting}. *

*/ -public class IndexRoutingTable implements Iterable { +public class IndexRoutingTable extends AbstractDiffable implements Iterable { + + public static final IndexRoutingTable PROTO = builder("").build(); private final String index; private final ShardShuffler shuffler; @@ -315,9 +317,51 @@ public class IndexRoutingTable implements Iterable { return new GroupShardsIterator(set); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexRoutingTable that = (IndexRoutingTable) o; + + if (!index.equals(that.index)) return false; + if (!shards.equals(that.shards)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = index.hashCode(); + result = 31 * result + shards.hashCode(); + return result; + } + public void validate() throws RoutingValidationException { } + @Override + public IndexRoutingTable readFrom(StreamInput in) throws IOException { + String index = in.readString(); + Builder builder = new Builder(index); + + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); + } + + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(index); + out.writeVInt(shards.size()); + for (IndexShardRoutingTable indexShard : this) { + IndexShardRoutingTable.Builder.writeToThin(indexShard, out); + } + } + public static Builder builder(String index) { return new Builder(index); } @@ -339,30 +383,7 @@ public class IndexRoutingTable implements Iterable { * @throws IOException if something happens during read */ public static IndexRoutingTable readFrom(StreamInput in) throws IOException { - String index = in.readString(); - Builder builder = new Builder(index); - - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - builder.addIndexShard(IndexShardRoutingTable.Builder.readFromThin(in, index)); - } - - return builder.build(); - } - - /** - * Writes an {@link IndexRoutingTable} to a {@link StreamOutput}. - * - * @param index {@link IndexRoutingTable} to write - * @param out {@link StreamOutput} to write to - * @throws IOException if something happens during write - */ - public static void writeTo(IndexRoutingTable index, StreamOutput out) throws IOException { - out.writeString(index.index()); - out.writeVInt(index.shards.size()); - for (IndexShardRoutingTable indexShard : index) { - IndexShardRoutingTable.Builder.writeToThin(indexShard, out); - } + return PROTO.readFrom(in); } /** @@ -398,7 +419,7 @@ public class IndexRoutingTable implements Iterable { */ private Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards, boolean asNew) { if (!shards.isEmpty()) { - throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); + throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) { IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true); @@ -420,7 +441,7 @@ public class IndexRoutingTable implements Iterable { */ private Builder initializeEmpty(IndexMetaData indexMetaData, boolean asNew) { if (!shards.isEmpty()) { - throw new ElasticsearchIllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); + throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created"); } for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) { IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId), asNew ? false : true); diff --git a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index 00e50b76129..2371b96f5b0 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -347,6 +347,28 @@ public class IndexShardRoutingTable implements Iterable { return new PlainShardIterator(shardId, ordered); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexShardRoutingTable that = (IndexShardRoutingTable) o; + + if (primaryAllocatedPostApi != that.primaryAllocatedPostApi) return false; + if (!shardId.equals(that.shardId)) return false; + if (!shards.equals(that.shards)) return false; + + return true; + } + + @Override + public int hashCode() { + int result = shardId.hashCode(); + result = 31 * result + shards.hashCode(); + result = 31 * result + (primaryAllocatedPostApi ? 1 : 0); + return result; + } + /** * Returns true iff all shards in the routing table are started otherwise false */ diff --git a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 4c4cba24507..ef46b6e8875 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -78,25 +77,6 @@ public class OperationRouting extends AbstractComponent { return indexRoutingTable(clusterState, index).groupByShardsIt(); } - public GroupShardsIterator deleteByQueryShards(ClusterState clusterState, String index, @Nullable Set routing) throws IndexMissingException { - if (routing == null || routing.isEmpty()) { - return indexRoutingTable(clusterState, index).groupByShardsIt(); - } - - // we use set here and not identity set since we might get duplicates - HashSet set = new HashSet<>(); - IndexRoutingTable indexRouting = indexRoutingTable(clusterState, index); - for (String r : routing) { - int shardId = shardId(clusterState, index, null, null, r); - IndexShardRoutingTable indexShard = indexRouting.shard(shardId); - if (indexShard == null) { - throw new IndexShardMissingException(new ShardId(index, shardId)); - } - set.add(indexShard.shardsRandomIt()); - } - return new GroupShardsIterator(Lists.newArrayList(set)); - } - public int searchShardsCount(ClusterState clusterState, String[] indices, String[] concreteIndices, @Nullable Map> routing, @Nullable String preference) throws IndexMissingException { final Set shards = computeTargetedShards(clusterState, concreteIndices, routing); return shards.size(); @@ -204,7 +184,7 @@ public class OperationRouting extends AbstractComponent { ensureNodeIdExists(nodes, nodeId); return indexShard.onlyNodeActiveInitializingShardsIt(nodeId); default: - throw new ElasticsearchIllegalArgumentException("unknown preference [" + preferenceType + "]"); + throw new IllegalArgumentException("unknown preference [" + preferenceType + "]"); } } // if not, then use it as the index @@ -279,14 +259,14 @@ public class OperationRouting extends AbstractComponent { @Deprecated protected int hash(HashFunction hashFunction, String type, String id) { if (type == null || "_all".equals(type)) { - throw new ElasticsearchIllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)"); + throw new IllegalArgumentException("Can't route an operation with no type and having type part of the routing (for backward comp)"); } return hashFunction.hash(type, id); } private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) { if (!nodes.dataNodes().keys().contains(nodeId)) { - throw new ElasticsearchIllegalArgumentException("No data node with id[" + nodeId + "] found"); + throw new IllegalArgumentException("No data node with id[" + nodeId + "] found"); } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/Preference.java b/src/main/java/org/elasticsearch/cluster/routing/Preference.java index 534b13b9a70..e8842f0de3e 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/Preference.java +++ b/src/main/java/org/elasticsearch/cluster/routing/Preference.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.cluster.routing; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * Routing Preference Type @@ -99,7 +98,7 @@ public enum Preference { case "_onlyLocal": return ONLY_LOCAL; default: - throw new ElasticsearchIllegalArgumentException("no Preference for [" + preferenceType + "]"); + throw new IllegalArgumentException("no Preference for [" + preferenceType + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java index 6f8341087e7..deb25938f19 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingNode.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing; import com.google.common.collect.Iterators; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import java.util.ArrayList; @@ -90,7 +89,7 @@ public class RoutingNode implements Iterable { // TODO use Set with ShardIds for faster lookup. for (MutableShardRouting shardRouting : shards) { if (shardRouting.shardId().equals(shard.shardId())) { - throw new ElasticsearchIllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists"); + throw new IllegalStateException("Trying to add a shard [" + shard.shardId().index().name() + "][" + shard.shardId().id() + "] to a node [" + nodeId + "] where it already exists"); } } shards.add(shard); diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java index 21378f22b99..33804559e4d 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingService.java @@ -75,15 +75,15 @@ public class RoutingService extends AbstractLifecycleComponent i } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { FutureUtils.cancel(scheduledRoutingTableFuture); scheduledRoutingTableFuture = null; clusterService.remove(this); diff --git a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java index 942d512eac8..a42a33ee0b7 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java +++ b/src/main/java/org/elasticsearch/cluster/routing/RoutingTable.java @@ -21,7 +21,7 @@ package org.elasticsearch.cluster.routing; import com.carrotsearch.hppc.IntSet; import com.google.common.collect.*; -import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.io.stream.StreamInput; @@ -44,7 +44,9 @@ import static com.google.common.collect.Maps.newHashMap; * * @see IndexRoutingTable */ -public class RoutingTable implements Iterable { +public class RoutingTable implements Iterable, Diffable { + + public static RoutingTable PROTO = builder().build(); public static final RoutingTable EMPTY_ROUTING_TABLE = builder().build(); @@ -254,6 +256,66 @@ public class RoutingTable implements Iterable { return new GroupShardsIterator(set); } + @Override + public Diff diff(RoutingTable previousState) { + return new RoutingTableDiff(previousState, this); + } + + @Override + public Diff readDiffFrom(StreamInput in) throws IOException { + return new RoutingTableDiff(in); + } + + @Override + public RoutingTable readFrom(StreamInput in) throws IOException { + Builder builder = new Builder(); + builder.version = in.readLong(); + int size = in.readVInt(); + for (int i = 0; i < size; i++) { + IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); + builder.add(index); + } + + return builder.build(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + out.writeVInt(indicesRouting.size()); + for (IndexRoutingTable index : indicesRouting.values()) { + index.writeTo(out); + } + } + + private static class RoutingTableDiff implements Diff { + + private final long version; + + private final Diff> indicesRouting; + + public RoutingTableDiff(RoutingTable before, RoutingTable after) { + version = after.version; + indicesRouting = DiffableUtils.diff(before.indicesRouting, after.indicesRouting); + } + + public RoutingTableDiff(StreamInput in) throws IOException { + version = in.readLong(); + indicesRouting = DiffableUtils.readImmutableMapDiff(in, IndexRoutingTable.PROTO); + } + + @Override + public RoutingTable apply(RoutingTable part) { + return new RoutingTable(version, indicesRouting.apply(part.indicesRouting)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeLong(version); + indicesRouting.writeTo(out); + } + } + public static Builder builder() { return new Builder(); } @@ -403,6 +465,11 @@ public class RoutingTable implements Iterable { return this; } + public Builder indicesRouting(ImmutableMap indicesRouting) { + this.indicesRouting.putAll(indicesRouting); + return this; + } + public Builder remove(String index) { indicesRouting.remove(index); return this; @@ -422,23 +489,7 @@ public class RoutingTable implements Iterable { } public static RoutingTable readFrom(StreamInput in) throws IOException { - Builder builder = new Builder(); - builder.version = in.readLong(); - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - IndexRoutingTable index = IndexRoutingTable.Builder.readFrom(in); - builder.add(index); - } - - return builder.build(); - } - - public static void writeTo(RoutingTable table, StreamOutput out) throws IOException { - out.writeLong(table.version); - out.writeVInt(table.indicesRouting.size()); - for (IndexRoutingTable index : table.indicesRouting.values()) { - IndexRoutingTable.Builder.writeTo(index, out); - } + return PROTO.readFrom(in); } } @@ -450,5 +501,4 @@ public class RoutingTable implements Iterable { return sb.toString(); } - } diff --git a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java index 7bea17c3c69..b36e1fcc88a 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java +++ b/src/main/java/org/elasticsearch/cluster/routing/ShardRoutingState.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster.routing; -import org.elasticsearch.ElasticsearchIllegalStateException; /** * Represents the current state of a {@link ShardRouting} as defined by the @@ -69,7 +68,7 @@ public enum ShardRoutingState { case 4: return RELOCATING; default: - throw new ElasticsearchIllegalStateException("No routing state mapped for [" + value + "]"); + throw new IllegalStateException("No routing state mapped for [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index 58f3ae58f50..8e6b68ecf78 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -21,10 +21,8 @@ package org.elasticsearch.cluster.routing.allocation; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.ImmutableList; -import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.ClusterInfoService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -120,7 +118,7 @@ public class AllocationService extends AbstractComponent { return reroute(clusterState, commands, false); } - public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) throws ElasticsearchException { + public RoutingAllocation.Result reroute(ClusterState clusterState, AllocationCommands commands, boolean explain) { RoutingNodes routingNodes = clusterState.routingNodes(); // we don't shuffle the unassigned shards here, to try and get as close as possible to // a consistent result of the effect the commands have on the routing @@ -501,7 +499,7 @@ public class AllocationService extends AbstractComponent { logger.debug("failed shard {} not found in routingNodes, ignoring it", failedShard); } } else { - throw new ElasticsearchIllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard); + throw new IllegalStateException("illegal state for a failed shard, relocating node id is set, but state does not match: " + failedShard); } } else { // the shard is not relocating, its either started, or initializing, just cancel it and move on... diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java index 6dde296c186..11f3e45653f 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/BalancedShardsAllocator.java @@ -22,7 +22,6 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import com.google.common.base.Predicate; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.IntroSorter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.RoutingNode; @@ -78,7 +77,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards final float shardBalance = settings.getAsFloat(SETTING_SHARD_BALANCE_FACTOR, weightFunction.shardBalance); float threshold = settings.getAsFloat(SETTING_THRESHOLD, BalancedShardsAllocator.this.threshold); if (threshold <= 0.0f) { - throw new ElasticsearchIllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); + throw new IllegalArgumentException("threshold must be greater than 0.0f but was: " + threshold); } BalancedShardsAllocator.this.threshold = threshold; BalancedShardsAllocator.this.weightFunction = new WeightFunction(indexBalance, shardBalance); @@ -180,7 +179,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards public WeightFunction(float indexBalance, float shardBalance) { float sum = indexBalance + shardBalance; if (sum <= 0.0f) { - throw new ElasticsearchIllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); + throw new IllegalArgumentException("Balance factors must sum to a value > 0 but was: " + sum); } theta = new float[]{shardBalance / sum, indexBalance / sum}; this.indexBalance = indexBalance; diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java index 0bbb7e750da..18e729aba89 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateAllocationCommand.java @@ -20,8 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -167,7 +165,7 @@ public class AllocateAllocationCommand implements AllocationCommand { } @Override - public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode discoNode = allocation.nodes().resolveNode(node); MutableShardRouting shardRouting = null; @@ -185,7 +183,7 @@ public class AllocateAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "failed to find " + shardId + " on the list of unassigned shards")); } - throw new ElasticsearchIllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards"); + throw new IllegalArgumentException("[allocate] failed to find " + shardId + " on the list of unassigned shards"); } if (shardRouting.primary() && !allowPrimary) { @@ -193,7 +191,7 @@ public class AllocateAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "trying to allocate a primary shard " + shardId + ", which is disabled")); } - throw new ElasticsearchIllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + ", which is disabled"); + throw new IllegalArgumentException("[allocate] trying to allocate a primary shard " + shardId + ", which is disabled"); } RoutingNode routingNode = allocation.routingNodes().node(discoNode.id()); @@ -203,13 +201,13 @@ public class AllocateAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "Allocation can only be done on data nodes, not [" + node + "]")); } - throw new ElasticsearchIllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]"); + throw new IllegalArgumentException("Allocation can only be done on data nodes, not [" + node + "]"); } else { if (explain) { return new RerouteExplanation(this, allocation.decision(Decision.NO, "allocate_allocation_command", "Could not find [" + node + "] among the routing nodes")); } - throw new ElasticsearchIllegalStateException("Could not find [" + node + "] among the routing nodes"); + throw new IllegalStateException("Could not find [" + node + "] among the routing nodes"); } } @@ -218,7 +216,7 @@ public class AllocateAllocationCommand implements AllocationCommand { if (explain) { return new RerouteExplanation(this, decision); } - throw new ElasticsearchIllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision); + throw new IllegalArgumentException("[allocate] allocation of " + shardId + " on node " + discoNode + " is not allowed, reason: " + decision); } // go over and remove it from the unassigned for (Iterator it = allocation.routingNodes().unassigned().iterator(); it.hasNext(); ) { diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java index d53151c5dbd..1e835dc4039 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommand.java @@ -88,5 +88,5 @@ public interface AllocationCommand { * @param allocation {@link RoutingAllocation} to modify * @throws org.elasticsearch.ElasticsearchException if something happens during reconfiguration */ - RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException; + RerouteExplanation execute(RoutingAllocation allocation, boolean explain); } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 40a75cdfd52..db41a759d35 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -19,8 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.command; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -61,10 +59,10 @@ public class AllocationCommands { } @SuppressWarnings("unchecked") - public static AllocationCommand.Factory lookupFactorySafe(String name) throws ElasticsearchIllegalArgumentException { + public static AllocationCommand.Factory lookupFactorySafe(String name) { AllocationCommand.Factory factory = factories.get(name); if (factory == null) { - throw new ElasticsearchIllegalArgumentException("No allocation command factory registered for name [" + name + "]"); + throw new IllegalArgumentException("No allocation command factory registered for name [" + name + "]"); } return factory; } @@ -113,7 +111,7 @@ public class AllocationCommands { * @param allocation {@link RoutingAllocation} to apply this command to * @throws org.elasticsearch.ElasticsearchException if something happens during execution */ - public RoutingExplanations execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RoutingExplanations execute(RoutingAllocation allocation, boolean explain) { RoutingExplanations explanations = new RoutingExplanations(); for (AllocationCommand command : commands) { explanations.add(command.execute(allocation, explain)); diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java index be01ccb6190..5d7a93635b4 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/CancelAllocationCommand.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -165,7 +164,7 @@ public class CancelAllocationCommand implements AllocationCommand { } @Override - public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode discoNode = allocation.nodes().resolveNode(node); boolean found = false; for (RoutingNodes.RoutingNodeIterator it = allocation.routingNodes().routingNodeIter(discoNode.id()); it.hasNext(); ) { @@ -197,7 +196,7 @@ public class CancelAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and initializing its state")); } - throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and initializing its state"); } it.moveToUnassigned(); @@ -220,7 +219,7 @@ public class CancelAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + " on node " + discoNode + ", shard is primary and started")); } - throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + " on node " + discoNode + ", shard is primary and started"); } it.remove(); @@ -233,7 +232,7 @@ public class CancelAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "cancel_allocation_command", "can't cancel " + shardId + ", failed to find it on node " + discoNode)); } - throw new ElasticsearchIllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode); + throw new IllegalArgumentException("[cancel_allocation] can't cancel " + shardId + ", failed to find it on node " + discoNode); } return new RerouteExplanation(this, allocation.decision(Decision.YES, "cancel_allocation_command", "shard " + shardId + " on node " + discoNode + " can be cancelled")); diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java index 0a52f6cc6f4..185e74443e1 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/command/MoveAllocationCommand.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.command; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.MutableShardRouting; @@ -147,7 +146,7 @@ public class MoveAllocationCommand implements AllocationCommand { } @Override - public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) throws ElasticsearchException { + public RerouteExplanation execute(RoutingAllocation allocation, boolean explain) { DiscoveryNode fromDiscoNode = allocation.nodes().resolveNode(fromNode); DiscoveryNode toDiscoNode = allocation.nodes().resolveNode(toNode); Decision decision = null; @@ -165,7 +164,7 @@ public class MoveAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "move_allocation_command", "shard " + shardId + " has not been started")); } - throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", shard is not started (state = " + shardRouting.state() + "]"); } @@ -175,7 +174,7 @@ public class MoveAllocationCommand implements AllocationCommand { if (explain) { return new RerouteExplanation(this, decision); } - throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision); + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", from " + fromDiscoNode + ", to " + toDiscoNode + ", since its not allowed, reason: " + decision); } if (decision.type() == Decision.Type.THROTTLE) { // its being throttled, maybe have a flag to take it into account and fail? for now, just do it since the "user" wants it... @@ -193,7 +192,7 @@ public class MoveAllocationCommand implements AllocationCommand { return new RerouteExplanation(this, allocation.decision(Decision.NO, "move_allocation_command", "shard " + shardId + " not found")); } - throw new ElasticsearchIllegalArgumentException("[move_allocation] can't move " + shardId + ", failed to find it on node " + fromDiscoNode); + throw new IllegalArgumentException("[move_allocation] can't move " + shardId + ", failed to find it on node " + fromDiscoNode); } return new RerouteExplanation(this, decision); } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java index f0480c4af7c..b057307cafb 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ClusterRebalanceAllocationDecider.java @@ -19,8 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.settings.Validator; @@ -58,7 +56,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { try { ClusterRebalanceType.parseString(value); return null; - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { return "the value of " + setting + " must be one of: [always, indices_primaries_active, indices_all_active]"; } } @@ -89,7 +87,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { } else if ("indices_all_active".equalsIgnoreCase(typeString) || "indicesAllActive".equalsIgnoreCase(typeString)) { return ClusterRebalanceType.INDICES_ALL_ACTIVE; } - throw new ElasticsearchIllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); + throw new IllegalArgumentException("Illegal value for " + CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE + ": " + typeString); } } @@ -101,7 +99,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { String allowRebalance = settings.get(CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, "indices_all_active"); try { type = ClusterRebalanceType.parseString(allowRebalance); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { logger.warn("[{}] has a wrong value {}, defaulting to 'indices_all_active'", CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE, allowRebalance); type = ClusterRebalanceType.INDICES_ALL_ACTIVE; } @@ -119,7 +117,7 @@ public class ClusterRebalanceAllocationDecider extends AllocationDecider { ClusterRebalanceType newType = null; try { newType = ClusterRebalanceType.parseString(newAllowRebalance); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // ignore } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java index 4f05fd196fb..76922ae2462 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/Decision.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ToXContent; @@ -117,7 +116,7 @@ public abstract class Decision implements ToXContent { case 2: return THROTTLE; default: - throw new ElasticsearchIllegalArgumentException("No Type for integer [" + i + "]"); + throw new IllegalArgumentException("No Type for integer [" + i + "]"); } } @@ -133,7 +132,7 @@ public abstract class Decision implements ToXContent { out.writeVInt(2); break; default: - throw new ElasticsearchIllegalArgumentException("Invalid Type [" + type + "]"); + throw new IllegalArgumentException("Invalid Type [" + type + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 726a588d1bf..a3969dcc232 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -142,19 +142,19 @@ public class DiskThresholdDecider extends AllocationDecider { private void warnAboutDiskIfNeeded(DiskUsage usage) { // Check absolute disk values if (usage.getFreeBytes() < DiskThresholdDecider.this.freeBytesThresholdHigh.bytes()) { - logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", + logger.warn("high disk watermark [{} free] exceeded on {}, shards will be relocated away from this node", DiskThresholdDecider.this.freeBytesThresholdHigh, usage); } else if (usage.getFreeBytes() < DiskThresholdDecider.this.freeBytesThresholdLow.bytes()) { - logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node", + logger.info("low disk watermark [{} free] exceeded on {}, replicas will not be assigned to this node", DiskThresholdDecider.this.freeBytesThresholdLow, usage); } // Check percentage disk values if (usage.getFreeDiskAsPercentage() < DiskThresholdDecider.this.freeDiskThresholdHigh) { - logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", + logger.warn("high disk watermark [{} free] exceeded on {}, shards will be relocated away from this node", Strings.format1Decimals(DiskThresholdDecider.this.freeDiskThresholdHigh, "%"), usage); } else if (usage.getFreeDiskAsPercentage() < DiskThresholdDecider.this.freeDiskThresholdLow) { - logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node", + logger.info("low disk watermark [{} free] exceeded on {}, replicas will not be assigned to this node", Strings.format1Decimals(DiskThresholdDecider.this.freeDiskThresholdLow, "%"), usage); } } diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java index bcbc1231e4b..7546482d87a 100644 --- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java +++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/EnableAllocationDecider.java @@ -19,8 +19,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; @@ -112,7 +110,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden"); } default: - throw new ElasticsearchIllegalStateException("Unknown allocation option"); + throw new IllegalStateException("Unknown allocation option"); } } @@ -148,7 +146,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe return allocation.decision(Decision.NO, NAME, "primary rebalancing is forbidden"); } default: - throw new ElasticsearchIllegalStateException("Unknown rebalance option"); + throw new IllegalStateException("Unknown rebalance option"); } } @@ -188,7 +186,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe try { return Allocation.valueOf(strValue); } catch (IllegalArgumentException e) { - throw new ElasticsearchIllegalArgumentException("Illegal allocation.enable value [" + strValue + "]"); + throw new IllegalArgumentException("Illegal allocation.enable value [" + strValue + "]"); } } } @@ -214,7 +212,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe try { return Rebalance.valueOf(strValue); } catch (IllegalArgumentException e) { - throw new ElasticsearchIllegalArgumentException("Illegal rebalance.enable value [" + strValue + "]"); + throw new IllegalArgumentException("Illegal rebalance.enable value [" + strValue + "]"); } } } diff --git a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java index ea6866c420f..a5b7470bfa1 100644 --- a/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java +++ b/src/main/java/org/elasticsearch/cluster/service/InternalClusterService.java @@ -20,8 +20,6 @@ package org.elasticsearch.cluster.service; import com.google.common.collect.Iterables; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ClusterState.Builder; @@ -60,6 +58,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF */ public class InternalClusterService extends AbstractLifecycleComponent implements ClusterService { + public static final String SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD = "cluster.service.slow_task_logging_threshold"; + public static final String SETTING_CLUSTER_SERVICE_RECONNECT_INTERVAL = "cluster.service.reconnect_interval"; + public static final String UPDATE_THREAD_NAME = "clusterService#updateTask"; private final ThreadPool threadPool; @@ -75,6 +76,8 @@ public class InternalClusterService extends AbstractLifecycleComponent slowTaskLoggingThreshold.getMillis()) { + logger.warn("cluster state update task [{}] took {} above the warn threshold of {}", source, executionTime, slowTaskLoggingThreshold); + } + } + class NotifyTimeout implements Runnable { final TimeoutClusterStateListener listener; final TimeValue timeout; @@ -754,4 +776,13 @@ public class InternalClusterService extends AbstractLifecycleComponent - * if (count <= 0) { - * throw new ElasticsearchIllegalArgumentException("must be positive: " + count); - * } - * - * to be replaced with the more compact - *
- *     checkArgument(count > 0, "must be positive: %s", count);
- * - * Note that the sense of the expression is inverted; with {@code Preconditions} - * you declare what you expect to be true, just as you do with an - * - * {@code assert} or a JUnit {@code assertTrue()} call. - * - *

Take care not to confuse precondition checking with other similar types - * of checks! Precondition exceptions -- including those provided here, but also - * {@link IndexOutOfBoundsException}, {@link NoSuchElementException}, {@link - * UnsupportedOperationException} and others -- are used to signal that the - * calling method has made an error. This tells the caller that it should - * not have invoked the method when it did, with the arguments it did, or - * perhaps ever. Postcondition or other invariant failures should not - * throw these types of exceptions. - * - *

Note: The methods of the {@code Preconditions} class are highly - * unusual in one way: they are supposed to throw exceptions, and promise - * in their specifications to do so even when given perfectly valid input. That - * is, {@code null} is a valid parameter to the method {@link - * #checkNotNull(Object)} -- and technically this parameter could be even marked - * as Nullable -- yet the method will still throw an exception anyway, - * because that's what its contract says to do. - * - * - */ -public final class Preconditions { - private Preconditions() { - } - - /** - * Ensures the truth of an expression involving one or more parameters to the - * calling method. - * - * @param expression a boolean expression - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException - * if {@code expression} is false - */ - public static void checkArgument(boolean expression) { - if (!expression) { - throw new ElasticsearchIllegalArgumentException(); - } - } - - /** - * Ensures the truth of an expression involving one or more parameters to the - * calling method. - * - * @param expression a boolean expression - * @param errorMessage the exception message to use if the check fails; will - * be converted to a string using {@link String#valueOf(Object)} - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException - * if {@code expression} is false - */ - public static void checkArgument(boolean expression, Object errorMessage) { - if (!expression) { - throw new ElasticsearchIllegalArgumentException(String.valueOf(errorMessage)); - } - } - - /** - * Ensures the truth of an expression involving one or more parameters to the - * calling method. - * - * @param expression a boolean expression - * @param errorMessageTemplate a template for the exception message should the - * check fail. The message is formed by replacing each {@code %s} - * placeholder in the template with an argument. These are matched by - * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. - * Unmatched arguments will be appended to the formatted message in square - * braces. Unmatched placeholders will be left as-is. - * @param errorMessageArgs the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException - * if {@code expression} is false - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if the check fails and either {@code - * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let - * this happen) - */ - public static void checkArgument(boolean expression, - String errorMessageTemplate, Object... errorMessageArgs) { - if (!expression) { - throw new ElasticsearchIllegalArgumentException( - format(errorMessageTemplate, errorMessageArgs)); - } - } - - /** - * Ensures the truth of an expression involving the state of the calling - * instance, but not involving any parameters to the calling method. - * - * @param expression a boolean expression - * @throws org.elasticsearch.ElasticsearchIllegalStateException - * if {@code expression} is false - */ - public static void checkState(boolean expression) { - if (!expression) { - throw new ElasticsearchIllegalStateException(); - } - } - - /** - * Ensures the truth of an expression involving the state of the calling - * instance, but not involving any parameters to the calling method. - * - * @param expression a boolean expression - * @param errorMessage the exception message to use if the check fails; will - * be converted to a string using {@link String#valueOf(Object)} - * @throws org.elasticsearch.ElasticsearchIllegalStateException - * if {@code expression} is false - */ - public static void checkState(boolean expression, Object errorMessage) { - if (!expression) { - throw new ElasticsearchIllegalStateException(String.valueOf(errorMessage)); - } - } - - /** - * Ensures the truth of an expression involving the state of the calling - * instance, but not involving any parameters to the calling method. - * - * @param expression a boolean expression - * @param errorMessageTemplate a template for the exception message should the - * check fail. The message is formed by replacing each {@code %s} - * placeholder in the template with an argument. These are matched by - * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. - * Unmatched arguments will be appended to the formatted message in square - * braces. Unmatched placeholders will be left as-is. - * @param errorMessageArgs the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. - * @throws org.elasticsearch.ElasticsearchIllegalStateException - * if {@code expression} is false - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if the check fails and either {@code - * errorMessageTemplate} or {@code errorMessageArgs} is null (don't let - * this happen) - */ - public static void checkState(boolean expression, - String errorMessageTemplate, Object... errorMessageArgs) { - if (!expression) { - throw new ElasticsearchIllegalStateException( - format(errorMessageTemplate, errorMessageArgs)); - } - } - - /** - * Ensures that an object reference passed as a parameter to the calling - * method is not null. - * - * @param reference an object reference - * @return the non-null reference that was validated - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if {@code reference} is null - */ - public static T checkNotNull(T reference) { - if (reference == null) { - throw new ElasticsearchNullPointerException(); - } - return reference; - } - - /** - * Ensures that an object reference passed as a parameter to the calling - * method is not null. - * - * @param reference an object reference - * @param errorMessage the exception message to use if the check fails; will - * be converted to a string using {@link String#valueOf(Object)} - * @return the non-null reference that was validated - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if {@code reference} is null - */ - public static T checkNotNull(T reference, Object errorMessage) { - if (reference == null) { - throw new ElasticsearchNullPointerException(String.valueOf(errorMessage)); - } - return reference; - } - - /** - * Ensures that an object reference passed as a parameter to the calling - * method is not null. - * - * @param reference an object reference - * @param errorMessageTemplate a template for the exception message should the - * check fail. The message is formed by replacing each {@code %s} - * placeholder in the template with an argument. These are matched by - * position - the first {@code %s} gets {@code errorMessageArgs[0]}, etc. - * Unmatched arguments will be appended to the formatted message in square - * braces. Unmatched placeholders will be left as-is. - * @param errorMessageArgs the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. - * @return the non-null reference that was validated - * @throws org.elasticsearch.ElasticsearchNullPointerException - * if {@code reference} is null - */ - public static T checkNotNull(T reference, String errorMessageTemplate, - Object... errorMessageArgs) { - if (reference == null) { - // If either of these parameters is null, the right thing happens anyway - throw new ElasticsearchNullPointerException( - format(errorMessageTemplate, errorMessageArgs)); - } - return reference; - } - - /** - * Substitutes each {@code %s} in {@code template} with an argument. These - * are matched by position - the first {@code %s} gets {@code args[0]}, etc. - * If there are more arguments than placeholders, the unmatched arguments will - * be appended to the end of the formatted message in square braces. - * - * @param template a non-null string containing 0 or more {@code %s} - * placeholders. - * @param args the arguments to be substituted into the message - * template. Arguments are converted to strings using - * {@link String#valueOf(Object)}. Arguments can be null. - */ - // VisibleForTesting - static String format(String template, Object... args) { - // start substituting the arguments into the '%s' placeholders - StringBuilder builder = new StringBuilder( - template.length() + 16 * args.length); - int templateStart = 0; - int i = 0; - while (i < args.length) { - int placeholderStart = template.indexOf("%s", templateStart); - if (placeholderStart == -1) { - break; - } - builder.append(template.substring(templateStart, placeholderStart)); - builder.append(args[i++]); - templateStart = placeholderStart + 2; - } - builder.append(template.substring(templateStart)); - - // if we run out of placeholders, append the extra args in square braces - if (i < args.length) { - builder.append(" ["); - builder.append(args[i++]); - while (i < args.length) { - builder.append(", "); - builder.append(args[i++]); - } - builder.append("]"); - } - - return builder.toString(); - } -} diff --git a/src/main/java/org/elasticsearch/common/Priority.java b/src/main/java/org/elasticsearch/common/Priority.java index 0aba2ddfd10..658a7e5e9e2 100644 --- a/src/main/java/org/elasticsearch/common/Priority.java +++ b/src/main/java/org/elasticsearch/common/Priority.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -51,7 +50,7 @@ public final class Priority implements Comparable { case 3: return LOW; case 4: return LANGUID; default: - throw new ElasticsearchIllegalArgumentException("can't find priority for [" + b + "]"); + throw new IllegalArgumentException("can't find priority for [" + b + "]"); } } diff --git a/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java b/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java index 3e9f3a74803..9a3c35f3527 100644 --- a/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java +++ b/src/main/java/org/elasticsearch/common/RandomBasedUUIDGenerator.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalStateException; import java.io.IOException; import java.util.Random; @@ -62,7 +61,7 @@ class RandomBasedUUIDGenerator implements UUIDGenerator { assert encoded[encoded.length - 2] == '='; return new String(encoded, 0, encoded.length - 2, Base64.PREFERRED_ENCODING); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("should not be thrown"); + throw new IllegalStateException("should not be thrown"); } } } diff --git a/src/main/java/org/elasticsearch/common/Strings.java b/src/main/java/org/elasticsearch/common/Strings.java index 99e6598d063..7aff86bc2cd 100644 --- a/src/main/java/org/elasticsearch/common/Strings.java +++ b/src/main/java/org/elasticsearch/common/Strings.java @@ -21,13 +21,29 @@ package org.elasticsearch.common; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; + import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.FastStringReader; import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.json.JsonXContent; import java.io.BufferedReader; -import java.util.*; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Properties; +import java.util.Random; +import java.util.Set; +import java.util.StringTokenizer; +import java.util.TreeSet; /** * @@ -1063,4 +1079,18 @@ public class Strings { public static String base64UUID() { return TIME_UUID_GENERATOR.getBase64UUID(); } + + /** + * Return a {@link String} that is the json representation of the provided + * {@link ToXContent}. + */ + public static String toString(ToXContent toXContent) { + try { + XContentBuilder builder = JsonXContent.contentBuilder(); + toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); + return builder.string(); + } catch (IOException e) { + throw new AssertionError("Cannot happen", e); + } + } } diff --git a/src/main/java/org/elasticsearch/common/Table.java b/src/main/java/org/elasticsearch/common/Table.java index 18ba34c2487..12a6dcc87da 100644 --- a/src/main/java/org/elasticsearch/common/Table.java +++ b/src/main/java/org/elasticsearch/common/Table.java @@ -21,7 +21,6 @@ package org.elasticsearch.common; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalStateException; import java.util.ArrayList; import java.util.HashMap; @@ -47,7 +46,7 @@ public class Table { public Table endHeaders() { if (currentCells == null || currentCells.isEmpty()) { - throw new ElasticsearchIllegalStateException("no headers added..."); + throw new IllegalStateException("no headers added..."); } inHeaders = false; headers = currentCells; @@ -73,7 +72,7 @@ public class Table { public Table startRow() { if (headers.isEmpty()) { - throw new ElasticsearchIllegalStateException("no headers added..."); + throw new IllegalStateException("no headers added..."); } currentCells = new ArrayList<>(headers.size()); return this; @@ -81,7 +80,7 @@ public class Table { public Table endRow(boolean check) { if (currentCells == null) { - throw new ElasticsearchIllegalStateException("no row started..."); + throw new IllegalStateException("no row started..."); } if (check && (currentCells.size() != headers.size())) { StringBuilder s = new StringBuilder(); @@ -89,7 +88,7 @@ public class Table { s.append(currentCells.size()); s.append(" in a row compared to header "); s.append(headers.size()); - throw new ElasticsearchIllegalStateException(s.toString()); + throw new IllegalStateException(s.toString()); } rows.add(currentCells); currentCells = null; @@ -107,11 +106,11 @@ public class Table { public Table addCell(Object value, String attributes) { if (currentCells == null) { - throw new ElasticsearchIllegalStateException("no block started..."); + throw new IllegalStateException("no block started..."); } if (!inHeaders) { if (currentCells.size() == headers.size()) { - throw new ElasticsearchIllegalStateException("can't add more cells to a row than the header"); + throw new IllegalStateException("can't add more cells to a row than the header"); } } Map mAttr; diff --git a/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java b/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java index b354a3c063e..b45d17e20d5 100644 --- a/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java +++ b/src/main/java/org/elasticsearch/common/TimeBasedUUIDGenerator.java @@ -19,12 +19,9 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalStateException; import java.io.IOException; -import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; /** These are essentially flake ids (http://boundary.com/blog/2012/01/12/flake-a-decentralized-k-ordered-unique-id-generator-in-erlang) but * we use 6 (not 8) bytes for timestamp, and use 3 (not 2) bytes for sequence number. */ @@ -87,7 +84,7 @@ class TimeBasedUUIDGenerator implements UUIDGenerator { try { encoded = Base64.encodeBytesToBytes(uuidBytes, 0, uuidBytes.length, Base64.URL_SAFE); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("should not be thrown", e); + throw new IllegalStateException("should not be thrown", e); } // We are a multiple of 3 bytes so we should not see any padding: diff --git a/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java b/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java index 6c099cfe014..afd8efdb8b4 100644 --- a/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java +++ b/src/main/java/org/elasticsearch/common/breaker/CircuitBreaker.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.breaker; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.Locale; @@ -50,7 +49,7 @@ public interface CircuitBreaker { case "memory": return Type.MEMORY; default: - throw new ElasticsearchIllegalArgumentException("No CircuitBreaker with type: " + value); + throw new IllegalArgumentException("No CircuitBreaker with type: " + value); } } } diff --git a/src/main/java/org/elasticsearch/common/bytes/BytesArray.java b/src/main/java/org/elasticsearch/common/bytes/BytesArray.java index c748ade21f9..9fc4098cad3 100644 --- a/src/main/java/org/elasticsearch/common/bytes/BytesArray.java +++ b/src/main/java/org/elasticsearch/common/bytes/BytesArray.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.bytes; import com.google.common.base.Charsets; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.StreamInput; @@ -90,7 +89,7 @@ public class BytesArray implements BytesReference { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > this.length) { - throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + this.length + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new BytesArray(bytes, offset + from, length); } diff --git a/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java index 6c74029f9f5..1bc370cd894 100644 --- a/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java +++ b/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.bytes; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.Channels; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.netty.NettyUtils; @@ -74,7 +73,7 @@ public class PagedBytesReference implements BytesReference { @Override public BytesReference slice(int from, int length) { if (from < 0 || (from + length) > length()) { - throw new ElasticsearchIllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); + throw new IllegalArgumentException("can't slice a buffer with length [" + length() + "], with slice parameters from [" + from + "], length [" + length + "]"); } return new PagedBytesReference(bigarrays, bytearray, offset + from, length); diff --git a/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java index eb6bb606933..8d4ac9068e7 100644 --- a/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java +++ b/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java @@ -36,7 +36,7 @@ public class ReleasablePagedBytesReference extends PagedBytesReference implement } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(bytearray); } } diff --git a/src/main/java/org/elasticsearch/common/cli/CliTool.java b/src/main/java/org/elasticsearch/common/cli/CliTool.java index eb0faf91d0a..f2e08c5b1e5 100644 --- a/src/main/java/org/elasticsearch/common/cli/CliTool.java +++ b/src/main/java/org/elasticsearch/common/cli/CliTool.java @@ -23,7 +23,6 @@ import com.google.common.base.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -146,7 +145,7 @@ public abstract class CliTool { } catch (IOException ioe) { terminal.printError(ioe); return ExitStatus.IO_ERROR.status; - } catch (IllegalArgumentException | ElasticsearchIllegalArgumentException ilae) { + } catch (IllegalArgumentException ilae) { terminal.printError(ilae); return ExitStatus.USAGE.status; } catch (Throwable t) { diff --git a/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java b/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java index cf5f4ac4f0a..6bd3a5bd45b 100644 --- a/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java +++ b/src/main/java/org/elasticsearch/common/collect/CopyOnWriteHashMap.java @@ -19,11 +19,11 @@ package org.elasticsearch.common.collect; +import com.google.common.base.Preconditions; import com.google.common.collect.Maps; import com.google.common.collect.UnmodifiableIterator; import org.apache.commons.lang3.ArrayUtils; import org.apache.lucene.util.mutable.MutableValueInt; -import org.elasticsearch.common.Preconditions; import java.util.*; diff --git a/src/main/java/org/elasticsearch/common/collect/HppcMaps.java b/src/main/java/org/elasticsearch/common/collect/HppcMaps.java index 6d3070cfbab..594b93df6a2 100644 --- a/src/main/java/org/elasticsearch/common/collect/HppcMaps.java +++ b/src/main/java/org/elasticsearch/common/collect/HppcMaps.java @@ -23,7 +23,6 @@ import com.carrotsearch.hppc.ObjectIntOpenHashMap; import com.carrotsearch.hppc.ObjectLookupContainer; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; import com.carrotsearch.hppc.cursors.ObjectCursor; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.Iterator; @@ -72,7 +71,7 @@ public final class HppcMaps { @Override public V put(K key, V value) { if (key == null) { - throw new ElasticsearchIllegalArgumentException("Map key must not be null"); + throw new IllegalArgumentException("Map key must not be null"); } return super.put(key, value); } @@ -134,7 +133,7 @@ public final class HppcMaps { @Override public int put(V key, int value) { if (key == null) { - throw new ElasticsearchIllegalArgumentException("Map key must not be null"); + throw new IllegalArgumentException("Map key must not be null"); } return super.put(key, value); } diff --git a/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java b/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java index d7f140655ee..3bac87515a0 100644 --- a/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java +++ b/src/main/java/org/elasticsearch/common/component/AbstractLifecycleComponent.java @@ -59,7 +59,7 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im @SuppressWarnings({"unchecked"}) @Override - public T start() throws ElasticsearchException { + public T start() { if (!lifecycle.canMoveToStarted()) { return (T) this; } @@ -74,11 +74,11 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im return (T) this; } - protected abstract void doStart() throws ElasticsearchException; + protected abstract void doStart(); @SuppressWarnings({"unchecked"}) @Override - public T stop() throws ElasticsearchException { + public T stop() { if (!lifecycle.canMoveToStopped()) { return (T) this; } @@ -93,10 +93,10 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im return (T) this; } - protected abstract void doStop() throws ElasticsearchException; + protected abstract void doStop(); @Override - public void close() throws ElasticsearchException { + public void close() { if (lifecycle.started()) { stop(); } @@ -113,5 +113,5 @@ public abstract class AbstractLifecycleComponent extends AbstractComponent im } } - protected abstract void doClose() throws ElasticsearchException; + protected abstract void doClose(); } diff --git a/src/main/java/org/elasticsearch/common/component/Lifecycle.java b/src/main/java/org/elasticsearch/common/component/Lifecycle.java index 92bc7aa4782..e6cbf264af3 100644 --- a/src/main/java/org/elasticsearch/common/component/Lifecycle.java +++ b/src/main/java/org/elasticsearch/common/component/Lifecycle.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.component; -import org.elasticsearch.ElasticsearchIllegalStateException; /** * Lifecycle state. Allows the following transitions: @@ -105,7 +104,7 @@ public class Lifecycle { return state == State.STOPPED || state == State.CLOSED; } - public boolean canMoveToStarted() throws ElasticsearchIllegalStateException { + public boolean canMoveToStarted() throws IllegalStateException { State localState = this.state; if (localState == State.INITIALIZED || localState == State.STOPPED) { return true; @@ -114,13 +113,13 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean moveToStarted() throws ElasticsearchIllegalStateException { + public boolean moveToStarted() throws IllegalStateException { State localState = this.state; if (localState == State.INITIALIZED || localState == State.STOPPED) { state = State.STARTED; @@ -130,12 +129,12 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean canMoveToStopped() throws ElasticsearchIllegalStateException { + public boolean canMoveToStopped() throws IllegalStateException { State localState = state; if (localState == State.STARTED) { return true; @@ -144,12 +143,12 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean moveToStopped() throws ElasticsearchIllegalStateException { + public boolean moveToStopped() throws IllegalStateException { State localState = state; if (localState == State.STARTED) { state = State.STOPPED; @@ -159,30 +158,30 @@ public class Lifecycle { return false; } if (localState == State.CLOSED) { - throw new ElasticsearchIllegalStateException("Can't move to started state when closed"); + throw new IllegalStateException("Can't move to started state when closed"); } - throw new ElasticsearchIllegalStateException("Can't move to started with unknown state"); + throw new IllegalStateException("Can't move to started with unknown state"); } - public boolean canMoveToClosed() throws ElasticsearchIllegalStateException { + public boolean canMoveToClosed() throws IllegalStateException { State localState = state; if (localState == State.CLOSED) { return false; } if (localState == State.STARTED) { - throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode"); + throw new IllegalStateException("Can't move to closed before moving to stopped mode"); } return true; } - public boolean moveToClosed() throws ElasticsearchIllegalStateException { + public boolean moveToClosed() throws IllegalStateException { State localState = state; if (localState == State.CLOSED) { return false; } if (localState == State.STARTED) { - throw new ElasticsearchIllegalStateException("Can't move to closed before moving to stopped mode"); + throw new IllegalStateException("Can't move to closed before moving to stopped mode"); } state = State.CLOSED; return true; diff --git a/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java b/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java index d8e72bd56d8..452f644462f 100644 --- a/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java +++ b/src/main/java/org/elasticsearch/common/component/LifecycleComponent.java @@ -35,7 +35,7 @@ public interface LifecycleComponent extends Releasable { void removeLifecycleListener(LifecycleListener listener); - T start() throws ElasticsearchException; + T start(); - T stop() throws ElasticsearchException; + T stop(); } diff --git a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java index 96dac682de5..fca80970439 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoDistance.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoDistance.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.geo; import org.apache.lucene.util.Bits; import org.apache.lucene.util.SloppyMath; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.GeoPointValues; @@ -203,7 +202,7 @@ public enum GeoDistance { } else if ("factor".equals(name)) { return FACTOR; } - throw new ElasticsearchIllegalArgumentException("No geo distance for [" + name + "]"); + throw new IllegalArgumentException("No geo distance for [" + name + "]"); } public static interface FixedSourceDistance { diff --git a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java index d679d2dd943..de37ddb4eb4 100644 --- a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java +++ b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java @@ -17,7 +17,6 @@ package org.elasticsearch.common.geo; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.ArrayList; import java.util.Collection; @@ -285,7 +284,7 @@ public class GeoHashUtils { case 'z': return 31; default: - throw new ElasticsearchIllegalArgumentException("the character '" + geo + "' is not a valid geohash character"); + throw new IllegalArgumentException("the character '" + geo + "' is not a valid geohash character"); } } @@ -354,7 +353,7 @@ public class GeoHashUtils { public static long encodeAsLong(double latitude, double longitude, int precision) { if((precision>12)||(precision<1)) { - throw new ElasticsearchIllegalArgumentException("Illegal precision length of "+precision+ + throw new IllegalArgumentException("Illegal precision length of "+precision+ ". Long-based geohashes only support precisions between 1 and 12"); } double latInterval0 = -90.0; @@ -479,4 +478,4 @@ public class GeoHashUtils { } return interval; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java index 22b6ee074ff..07ceaf1b762 100644 --- a/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java +++ b/src/main/java/org/elasticsearch/common/geo/builders/ShapeBuilder.java @@ -27,7 +27,6 @@ import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryFactory; import org.apache.commons.lang3.tuple.Pair; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.ESLoggerFactory; @@ -256,7 +255,7 @@ public abstract class ShapeBuilder implements ToXContent { } return new CoordinateNode(new Coordinate(lon, lat)); } else if (token == XContentParser.Token.VALUE_NULL) { - throw new ElasticsearchIllegalArgumentException("coordinates cannot contain NULL values)"); + throw new IllegalArgumentException("coordinates cannot contain NULL values)"); } List nodes = new ArrayList<>(); @@ -703,7 +702,7 @@ public abstract class ShapeBuilder implements ToXContent { return type; } } - throw new ElasticsearchIllegalArgumentException("unknown geo_shape ["+geoshapename+"]"); + throw new IllegalArgumentException("unknown geo_shape ["+geoshapename+"]"); } public static ShapeBuilder parse(XContentParser parser) throws IOException { diff --git a/src/main/java/org/elasticsearch/common/io/Streams.java b/src/main/java/org/elasticsearch/common/io/Streams.java index 63f62015c87..b5f224e72f0 100644 --- a/src/main/java/org/elasticsearch/common/io/Streams.java +++ b/src/main/java/org/elasticsearch/common/io/Streams.java @@ -20,14 +20,12 @@ package org.elasticsearch.common.io; import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.util.Callback; import java.io.*; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.List; /** diff --git a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java index 4235d38a21d..2107a9958da 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java @@ -60,11 +60,6 @@ public class BytesStreamOutput extends StreamOutput implements BytesStream { this.bytes = bigarrays.newByteArray(expectedSize); } - @Override - public boolean seekPositionSupported() { - return true; - } - @Override public long position() throws IOException { return count; diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index ac0e9b3e2d8..dc9d9b60309 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -32,6 +32,7 @@ import org.joda.time.DateTime; import java.io.IOException; import java.io.InputStream; +import java.io.ObjectInputStream; import java.util.*; /** @@ -411,6 +412,8 @@ public abstract class StreamInput extends InputStream { return readFloatArray(); case 20: return readDoubleArray(); + case 21: + return readBytesRef(); default: throw new IOException("Can't read unknown type [" + type + "]"); } @@ -472,4 +475,13 @@ public abstract class StreamInput extends InputStream { return null; } } + + public T readThrowable() throws IOException { + try { + ObjectInputStream oin = new ObjectInputStream(this); + return (T) oin.readObject(); + } catch (ClassNotFoundException e) { + throw new IOException("failed to deserialize exception", e); + } + } } diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index c4eb5bf4ce6..39b940d95b3 100644 --- a/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.text.Text; import org.joda.time.ReadableInstant; import java.io.IOException; +import java.io.ObjectOutputStream; import java.io.OutputStream; import java.util.Date; import java.util.LinkedHashMap; @@ -50,10 +51,6 @@ public abstract class StreamOutput extends OutputStream { return this; } - public boolean seekPositionSupported() { - return false; - } - public long position() throws IOException { throw new UnsupportedOperationException(); } @@ -398,6 +395,9 @@ public abstract class StreamOutput extends OutputStream { } else if (type == double[].class) { writeByte((byte) 20); writeDoubleArray((double[]) value); + } else if (value instanceof BytesRef) { + writeByte((byte) 21); + writeBytesRef((BytesRef) value); } else { throw new IOException("Can't write type [" + type + "]"); } @@ -442,4 +442,10 @@ public abstract class StreamOutput extends OutputStream { writeBoolean(false); } } + + public void writeThrowable(Throwable throwable) throws IOException { + ObjectOutputStream out = new ObjectOutputStream(this); + out.writeObject(throwable); + out.flush(); + } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java b/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java similarity index 71% rename from src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java rename to src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java index 027f794e6f1..28e2175f4ce 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/CachedFilter.java +++ b/src/main/java/org/elasticsearch/common/io/stream/StreamableReader.java @@ -16,17 +16,15 @@ * specific language governing permissions and limitations * under the License. */ +package org.elasticsearch.common.io.stream; -package org.elasticsearch.common.lucene.search; +import java.io.IOException; -import org.apache.lucene.search.Filter; - -/** - * A marker indicating that this is a cached filter. - */ -public abstract class CachedFilter extends Filter { - - public static boolean isCached(Filter filter) { - return filter instanceof CachedFilter; - } -} \ No newline at end of file +public interface StreamableReader { + /** + * Reads a copy of an object with the same type form the stream input + * + * The caller object remains unchanged. + */ + T readFrom(StreamInput in) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java similarity index 74% rename from src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java rename to src/main/java/org/elasticsearch/common/io/stream/Writeable.java index 63640732d42..9025315dc43 100644 --- a/src/main/java/org/elasticsearch/action/deletebyquery/ShardDeleteByQueryResponse.java +++ b/src/main/java/org/elasticsearch/common/io/stream/Writeable.java @@ -17,13 +17,14 @@ * under the License. */ -package org.elasticsearch.action.deletebyquery; +package org.elasticsearch.common.io.stream; -import org.elasticsearch.action.ActionWriteResponse; +import java.io.IOException; -/** - * Delete by query response executed on a specific shard. - */ -public class ShardDeleteByQueryResponse extends ActionWriteResponse { +public interface Writeable extends StreamableReader { -} \ No newline at end of file + /** + * Writes the current object into the output stream out + */ + void writeTo(StreamOutput out) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/common/lease/Releasable.java b/src/main/java/org/elasticsearch/common/lease/Releasable.java index 84b11fd0498..29ee1d59e6f 100644 --- a/src/main/java/org/elasticsearch/common/lease/Releasable.java +++ b/src/main/java/org/elasticsearch/common/lease/Releasable.java @@ -27,5 +27,5 @@ import org.elasticsearch.ElasticsearchException; public interface Releasable extends AutoCloseable { @Override - void close() throws ElasticsearchException; + void close(); } diff --git a/src/main/java/org/elasticsearch/common/lease/Releasables.java b/src/main/java/org/elasticsearch/common/lease/Releasables.java index 0322d66adcc..c91494a235d 100644 --- a/src/main/java/org/elasticsearch/common/lease/Releasables.java +++ b/src/main/java/org/elasticsearch/common/lease/Releasables.java @@ -104,7 +104,7 @@ public enum Releasables { return new Releasable() { @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(releasables); } @@ -116,7 +116,7 @@ public enum Releasables { return new Releasable() { @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(releasables); } diff --git a/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java b/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java new file mode 100644 index 00000000000..d31cd3835ec --- /dev/null +++ b/src/main/java/org/elasticsearch/common/lucene/IndexCacheableQuery.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; + +import java.io.IOException; +import java.util.Objects; + +/** + * Base implementation for a query which is cacheable at the index level but + * not the segment level as usually expected. + */ +public abstract class IndexCacheableQuery extends Query { + + private Object readerCacheKey; + + @Override + public Query rewrite(IndexReader reader) throws IOException { + if (reader.getCoreCacheKey() != this.readerCacheKey) { + IndexCacheableQuery rewritten = (IndexCacheableQuery) clone(); + rewritten.readerCacheKey = reader.getCoreCacheKey(); + return rewritten; + } + return super.rewrite(reader); + } + + @Override + public boolean equals(Object obj) { + return super.equals(obj) + && readerCacheKey == ((IndexCacheableQuery) obj).readerCacheKey; + } + + @Override + public int hashCode() { + return 31 * super.hashCode() + Objects.hashCode(readerCacheKey); + } + + @Override + public final Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + if (readerCacheKey == null) { + throw new IllegalStateException("Rewrite first"); + } + if (readerCacheKey != searcher.getIndexReader().getCoreCacheKey()) { + throw new IllegalStateException("Must create weight on the same reader which has been used for rewriting"); + } + return doCreateWeight(searcher, needsScores); + } + + /** Create a {@link Weight} for this query. + * @see Query#createWeight(IndexSearcher, boolean) + */ + public abstract Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/common/lucene/Lucene.java b/src/main/java/org/elasticsearch/common/lucene/Lucene.java index 229e94a95d2..92593d13479 100644 --- a/src/main/java/org/elasticsearch/common/lucene/Lucene.java +++ b/src/main/java/org/elasticsearch/common/lucene/Lucene.java @@ -39,7 +39,6 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.search.Collector; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.Filter; @@ -62,8 +61,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Counter; import org.apache.lucene.util.Version; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -514,7 +511,7 @@ public class Lucene { public static void writeScoreDoc(StreamOutput out, ScoreDoc scoreDoc) throws IOException { if (!scoreDoc.getClass().equals(ScoreDoc.class)) { - throw new ElasticsearchIllegalArgumentException("This method can only be used to serialize a ScoreDoc, not a " + scoreDoc.getClass()); + throw new IllegalArgumentException("This method can only be used to serialize a ScoreDoc, not a " + scoreDoc.getClass()); } out.writeVInt(scoreDoc.doc); out.writeFloat(scoreDoc.score); @@ -530,48 +527,29 @@ public class Lucene { } public static Explanation readExplanation(StreamInput in) throws IOException { - Explanation explanation; - if (in.readBoolean()) { - Boolean match = in.readOptionalBoolean(); - explanation = new ComplexExplanation(); - ((ComplexExplanation) explanation).setMatch(match); - + boolean match = in.readBoolean(); + String description = in.readString(); + final Explanation[] subExplanations = new Explanation[in.readVInt()]; + for (int i = 0; i < subExplanations.length; ++i) { + subExplanations[i] = readExplanation(in); + } + if (match) { + return Explanation.match(in.readFloat(), description, subExplanations); } else { - explanation = new Explanation(); + return Explanation.noMatch(description, subExplanations); } - explanation.setValue(in.readFloat()); - explanation.setDescription(in.readString()); - if (in.readBoolean()) { - int size = in.readVInt(); - for (int i = 0; i < size; i++) { - explanation.addDetail(readExplanation(in)); - } - } - return explanation; } public static void writeExplanation(StreamOutput out, Explanation explanation) throws IOException { - - if (explanation instanceof ComplexExplanation) { - out.writeBoolean(true); - out.writeOptionalBoolean(((ComplexExplanation) explanation).getMatch()); - } else { - out.writeBoolean(false); - } - out.writeFloat(explanation.getValue()); - if (explanation.getDescription() == null) { - throw new ElasticsearchIllegalArgumentException("Explanation descriptions should NOT be null\n[" + explanation.toString() + "]"); - } + out.writeBoolean(explanation.isMatch()); out.writeString(explanation.getDescription()); Explanation[] subExplanations = explanation.getDetails(); - if (subExplanations == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeVInt(subExplanations.length); - for (Explanation subExp : subExplanations) { - writeExplanation(out, subExp); - } + out.writeVInt(subExplanations.length); + for (Explanation subExp : subExplanations) { + writeExplanation(out, subExp); + } + if (explanation.isMatch()) { + out.writeFloat(explanation.getValue()); } } @@ -652,6 +630,35 @@ public class Lucene { return DirectoryReader.indexExists(directory); } + /** + * Wait for an index to exist for up to {@code timeLimitMillis}. Returns + * true if the index eventually exists, false if not. + * + * Will retry the directory every second for at least {@code timeLimitMillis} + */ + public static final boolean waitForIndex(final Directory directory, final long timeLimitMillis) + throws IOException { + final long DELAY = 1000; + long waited = 0; + try { + while (true) { + if (waited >= timeLimitMillis) { + break; + } + if (indexExists(directory)) { + return true; + } + Thread.sleep(DELAY); + waited += DELAY; + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return false; + } + // one more try after all retries + return indexExists(directory); + } + /** * Returns true iff the given exception or * one of it's causes is an instance of {@link CorruptIndexException}, @@ -690,27 +697,27 @@ public class Lucene { return new Scorer(null) { @Override public float score() throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int freq() throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int advance(int arg0) throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public long cost() { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int docID() { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } @Override public int nextDoc() throws IOException { - throw new ElasticsearchIllegalStateException(message); + throw new IllegalStateException(message); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java b/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java new file mode 100644 index 00000000000..0d9270edaff --- /dev/null +++ b/src/main/java/org/elasticsearch/common/lucene/ShardCoreKeyMap.java @@ -0,0 +1,109 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import com.google.common.collect.HashMultimap; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Multimap; + +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReader.CoreClosedListener; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardUtils; + +import java.io.IOException; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Set; + +/** + * A map between segment core cache keys and the shard that these segments + * belong to. This allows to get the shard that a segment belongs to or to get + * the entire set of live core cache keys for a given index. In order to work + * this class needs to be notified about new segments. It modifies the current + * mappings as segments that were not known before are added and prevents the + * structure from growing indefinitely by registering close listeners on these + * segments so that at any time it only tracks live segments. + * + * NOTE: This is heavy. Avoid using this class unless absolutely required. + */ +public final class ShardCoreKeyMap { + + private final Map coreKeyToShard; + private final Multimap indexToCoreKey; + + public ShardCoreKeyMap() { + coreKeyToShard = new IdentityHashMap<>(); + indexToCoreKey = HashMultimap.create(); + } + + /** + * Register a {@link LeafReader}. This is necessary so that the core cache + * key of this reader can be found later using {@link #getCoreCacheKeys(ShardId)}. + */ + public void add(LeafReader reader) { + final ShardId shardId = ShardUtils.extractShardId(reader); + if (shardId == null) { + throw new IllegalArgumentException("Could not extract shard id from " + reader); + } + final Object coreKey = reader.getCoreCacheKey(); + final String index = shardId.getIndex(); + synchronized (this) { + if (coreKeyToShard.put(coreKey, shardId) == null) { + final boolean added = indexToCoreKey.put(index, coreKey); + assert added; + reader.addCoreClosedListener(new CoreClosedListener() { + @Override + public void onClose(Object ownerCoreCacheKey) throws IOException { + assert coreKey == ownerCoreCacheKey; + synchronized (ShardCoreKeyMap.this) { + coreKeyToShard.remove(ownerCoreCacheKey); + indexToCoreKey.remove(index, coreKey); + } + } + }); + } + } + } + + /** + * Return the {@link ShardId} that holds the given segment, or {@code null} + * if this segment is not tracked. + */ + public synchronized ShardId getShardId(Object coreKey) { + return coreKeyToShard.get(coreKey); + } + + /** + * Get the set of core cache keys associated with the given index. + */ + public synchronized Set getCoreKeysForIndex(String index) { + return ImmutableSet.copyOf(indexToCoreKey.get(index)); + } + + /** + * Return the number of tracked segments. + */ + public synchronized int size() { + assert indexToCoreKey.size() == coreKeyToShard.size(); + return coreKeyToShard.size(); + } + +} diff --git a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java index 2ff71b2f1de..fb6ebc28e1c 100644 --- a/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java +++ b/src/main/java/org/elasticsearch/common/lucene/all/AllEntries.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.lucene.all; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.io.FastCharArrayWriter; import org.elasticsearch.common.io.FastStringReader; @@ -111,7 +110,7 @@ public class AllEntries extends Reader { entry.reader().reset(); } } catch (IOException e) { - throw new ElasticsearchIllegalStateException("should not happen"); + throw new IllegalStateException("should not happen"); } it = entries.iterator(); if (it.hasNext()) { diff --git a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java index 82b57e4e452..71cc5d7f9c2 100644 --- a/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java +++ b/src/main/java/org/elasticsearch/common/lucene/docset/DocIdSets.java @@ -22,14 +22,14 @@ package org.elasticsearch.common.lucene.docset; import org.apache.lucene.index.LeafReader; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; import org.apache.lucene.util.RamUsageEstimator; import org.apache.lucene.util.RoaringDocIdSet; import org.apache.lucene.util.SparseFixedBitSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import java.io.IOException; @@ -106,32 +106,41 @@ public class DocIdSets { } /** - * Given a {@link DocIdSet}, return a {@link Bits} instance that will match + * Given a {@link Scorer}, return a {@link Bits} instance that will match * all documents contained in the set. Note that the returned {@link Bits} - * instance should only be consumed once and in order. + * instance MUST be consumed in order. */ - public static Bits asSequentialAccessBits(final int maxDoc, @Nullable DocIdSet set) throws IOException { - if (set == null) { + public static Bits asSequentialAccessBits(final int maxDoc, @Nullable Scorer scorer) throws IOException { + if (scorer == null) { return new Bits.MatchNoBits(maxDoc); } - Bits bits = set.bits(); - if (bits != null) { - return bits; - } - final DocIdSetIterator iterator = set.iterator(); - if (iterator == null) { - return new Bits.MatchNoBits(maxDoc); + final TwoPhaseIterator twoPhase = scorer.asTwoPhaseIterator(); + final DocIdSetIterator iterator; + if (twoPhase == null) { + iterator = scorer; + } else { + iterator = twoPhase.approximation(); } + return new Bits() { - int previous = 0; + int previous = -1; + boolean previousMatched = false; @Override public boolean get(int index) { + if (index < 0 || index >= maxDoc) { + throw new IndexOutOfBoundsException(index + " is out of bounds: [" + 0 + "-" + maxDoc + "["); + } if (index < previous) { - throw new ElasticsearchIllegalArgumentException("This Bits instance can only be consumed in order. " + throw new IllegalArgumentException("This Bits instance can only be consumed in order. " + "Got called on [" + index + "] while previously called on [" + previous + "]"); } + if (index == previous) { + // we cache whether it matched because it is illegal to call + // twoPhase.matches() twice + return previousMatched; + } previous = index; int doc = iterator.docID(); @@ -139,10 +148,17 @@ public class DocIdSets { try { doc = iterator.advance(index); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot advance iterator", e); + throw new IllegalStateException("Cannot advance iterator", e); } } - return index == doc; + if (index == doc) { + try { + return previousMatched = twoPhase == null || twoPhase.matches(); + } catch (IOException e) { + throw new IllegalStateException("Cannot validate match", e); + } + } + return previousMatched = false; } @Override diff --git a/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java b/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java index 9531f8ea77f..15a421567e5 100644 --- a/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java +++ b/src/main/java/org/elasticsearch/common/lucene/index/FilterableTermsEnum.java @@ -31,7 +31,6 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.docset.DocIdSets; @@ -69,7 +68,7 @@ public class FilterableTermsEnum extends TermsEnum { public FilterableTermsEnum(IndexReader reader, String field, int docsEnumFlag, @Nullable final Filter filter) throws IOException { if ((docsEnumFlag != PostingsEnum.FREQS) && (docsEnumFlag != PostingsEnum.NONE)) { - throw new ElasticsearchIllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag); + throw new IllegalArgumentException("invalid docsEnumFlag of " + docsEnumFlag); } this.docsEnumFlag = docsEnumFlag; if (filter == null) { @@ -202,4 +201,4 @@ public class FilterableTermsEnum extends TermsEnum { public BytesRef next() throws IOException { throw new UnsupportedOperationException(UNSUPPORTED_MESSAGE); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java b/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java index 8b33b3ef4ad..a74b1b74645 100644 --- a/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java +++ b/src/main/java/org/elasticsearch/common/lucene/index/FreqTermsEnum.java @@ -103,7 +103,7 @@ public class FreqTermsEnum extends FilterableTermsEnum implements Releasable { @Override - public void close() throws ElasticsearchException { + public void close() { try { Releasables.close(cachedTermOrds, termDocFreqs, termsTotalFreqs); } finally { diff --git a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java index 7501307264b..770ddac0ce3 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/FilteredCollector.java @@ -31,18 +31,18 @@ import java.io.IOException; public class FilteredCollector implements Collector { private final Collector collector; - private final Filter filter; + private final Weight filter; - public FilteredCollector(Collector collector, Filter filter) { + public FilteredCollector(Collector collector, Weight filter) { this.collector = collector; this.filter = filter; } @Override public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - final DocIdSet set = filter.getDocIdSet(context, null); + final Scorer filterScorer = filter.scorer(context, null); final LeafCollector in = collector.getLeafCollector(context); - final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), set); + final Bits bits = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); return new FilterLeafCollector(in) { @Override diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java deleted file mode 100644 index 73b3ba0590c..00000000000 --- a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheFilter.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.util.Bits; - -import java.io.IOException; - -/** - * A marker interface for {@link org.apache.lucene.search.Filter} denoting the filter - * as one that should not be cached, ever. - */ -public abstract class NoCacheFilter extends Filter { - - private static final class NoCacheFilterWrapper extends NoCacheFilter { - private final Filter delegate; - private NoCacheFilterWrapper(Filter delegate) { - this.delegate = delegate; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - return delegate.getDocIdSet(context, acceptDocs); - } - - @Override - public int hashCode() { - return delegate.hashCode(); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - if (obj instanceof NoCacheFilterWrapper) { - return delegate.equals(((NoCacheFilterWrapper)obj).delegate); - } - return false; - } - - @Override - public String toString(String field) { - - return "no_cache(" + delegate + ")"; - } - - } - - /** - * Wraps a filter in a NoCacheFilter or returns it if it already is a NoCacheFilter. - */ - public static Filter wrap(Filter filter) { - if (filter instanceof NoCacheFilter) { - return filter; - } - return new NoCacheFilterWrapper(filter); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java index b64758ee592..fe33206b0cc 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/Queries.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/Queries.java @@ -19,19 +19,19 @@ package org.elasticsearch.common.lucene.search; +import org.apache.lucene.index.Term; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.MatchAllDocsQuery; -import org.apache.lucene.search.MatchNoDocsQuery; +import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.SuppressForbidden; -import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import java.util.List; import java.util.regex.Pattern; @@ -51,11 +51,19 @@ public class Queries { } public static Filter newMatchAllFilter() { - return wrap(newMatchAllQuery()); + return new QueryWrapperFilter(newMatchAllQuery()); } public static Filter newMatchNoDocsFilter() { - return wrap(newMatchNoDocsQuery()); + return new QueryWrapperFilter(newMatchNoDocsQuery()); + } + + public static Filter newNestedFilter() { + return new QueryWrapperFilter(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); + } + + public static Filter newNonNestedFilter() { + return new QueryWrapperFilter(not(newNestedFilter())); } /** Return a query that matches all documents but those that match the given query. */ @@ -158,24 +166,4 @@ public class Queries { optionalClauseCount : (result < 0 ? 0 : result)); } - - /** - * Wraps a query in a filter. - * - * If a filter has an anti per segment execution / caching nature then @{@link CustomQueryWrappingFilter} is returned - * otherwise the standard {@link org.apache.lucene.search.QueryWrapperFilter} is returned. - */ - @SuppressForbidden(reason = "QueryWrapperFilter cachability") - public static Filter wrap(Query query, QueryParseContext context) { - if ((context != null && context.requireCustomQueryWrappingFilter()) || CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(query)) { - return new CustomQueryWrappingFilter(query); - } else { - return new QueryWrapperFilter(query); - } - } - - /** Wrap as a {@link Filter}. */ - public static Filter wrap(Query query) { - return wrap(query, null); - } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java index 4578e6711be..a4c92d78804 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/ResolvableFilter.java @@ -19,9 +19,11 @@ package org.elasticsearch.common.lucene.search; +import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; import org.apache.lucene.util.Bits; import java.io.IOException; @@ -37,8 +39,6 @@ public abstract class ResolvableFilter extends Filter { */ public abstract Filter resolve(); - - @Override public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { Filter resolvedFilter = resolve(); @@ -48,4 +48,13 @@ public abstract class ResolvableFilter extends Filter { return null; } } + + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Filter resolved = resolve(); + if (resolved != null) { + return resolved; + } + return super.rewrite(reader); + } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java index 01610a791b2..58d438adb3a 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/BoostScoreFunction.java @@ -52,9 +52,7 @@ public class BoostScoreFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - Explanation exp = new Explanation(boost, "static boost factor"); - exp.addDetail(new Explanation(boost, "boostFactor")); - return exp; + return Explanation.match(boost, "static boost factor", Explanation.match(boost, "boostFactor")); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java index d5455fa1f38..30c8f01b709 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/CombineFunction.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.lucene.search.function; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; public enum CombineFunction { @@ -35,16 +34,15 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost) * queryExpl.getValue(); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - res.addDetail(queryExpl); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - res.addDetail(minExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); + Explanation minExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), + "min of:", + funcExpl, boostExpl); + return Explanation.match(score, "function score, product of:", + queryExpl, minExpl, Explanation.match(queryBoost, "queryBoost")); } }, REPLACE { @@ -59,15 +57,15 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = queryBoost * Math.min(funcExpl.getValue(), maxBoost); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - res.addDetail(minExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation boostExpl = Explanation.match(maxBoost, "maxBoost"); + Explanation minExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), + "min of:", + funcExpl, boostExpl); + return Explanation.match(score, "function score, product of:", + minExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -83,19 +81,14 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = queryBoost * (Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation sumExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(), - "sum of"); - sumExpl.addDetail(queryExpl); - sumExpl.addDetail(minExpl); - res.addDetail(sumExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation sumExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue(), "sum of", + queryExpl, minExpl); + return Explanation.match(score, "function score, product of:", + sumExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -111,19 +104,15 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = toFloat(queryBoost * (queryExpl.getValue() + Math.min(funcExpl.getValue(), maxBoost)) / 2.0); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation minExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - minExpl.addDetail(funcExpl); - minExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation avgExpl = new ComplexExplanation(true, - toFloat((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of"); - avgExpl.addDetail(queryExpl); - avgExpl.addDetail(minExpl); - res.addDetail(avgExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation minExpl = Explanation.match(Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation avgExpl = Explanation.match( + toFloat((Math.min(funcExpl.getValue(), maxBoost) + queryExpl.getValue()) / 2.0), "avg of", + queryExpl, minExpl); + return Explanation.match(score, "function score, product of:", + avgExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -139,19 +128,16 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = toFloat(queryBoost * Math.min(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost))); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - innerMinExpl.addDetail(funcExpl); - innerMinExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation outerMinExpl = new ComplexExplanation(true, Math.min(Math.min(funcExpl.getValue(), maxBoost), - queryExpl.getValue()), "min of"); - outerMinExpl.addDetail(queryExpl); - outerMinExpl.addDetail(innerMinExpl); - res.addDetail(outerMinExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation innerMinExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation outerMinExpl = Explanation.match( + Math.min(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "min of", + queryExpl, innerMinExpl); + return Explanation.match(score, "function score, product of:", + outerMinExpl, Explanation.match(queryBoost, "queryBoost")); } }, @@ -167,19 +153,16 @@ public enum CombineFunction { } @Override - public ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { + public Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost) { float score = toFloat(queryBoost * Math.max(queryExpl.getValue(), Math.min(funcExpl.getValue(), maxBoost))); - ComplexExplanation res = new ComplexExplanation(true, score, "function score, product of:"); - ComplexExplanation innerMinExpl = new ComplexExplanation(true, Math.min(funcExpl.getValue(), maxBoost), "Math.min of"); - innerMinExpl.addDetail(funcExpl); - innerMinExpl.addDetail(new Explanation(maxBoost, "maxBoost")); - ComplexExplanation outerMaxExpl = new ComplexExplanation(true, Math.max(Math.min(funcExpl.getValue(), maxBoost), - queryExpl.getValue()), "max of"); - outerMaxExpl.addDetail(queryExpl); - outerMaxExpl.addDetail(innerMinExpl); - res.addDetail(outerMaxExpl); - res.addDetail(new Explanation(queryBoost, "queryBoost")); - return res; + Explanation innerMinExpl = Explanation.match( + Math.min(funcExpl.getValue(), maxBoost), "min of:", + funcExpl, Explanation.match(maxBoost, "maxBoost")); + Explanation outerMaxExpl = Explanation.match( + Math.max(Math.min(funcExpl.getValue(), maxBoost), queryExpl.getValue()), "max of:", + queryExpl, innerMinExpl); + return Explanation.match(score, "function score, product of:", + outerMaxExpl, Explanation.match(queryBoost, "queryBoost")); } }; @@ -198,5 +181,5 @@ public enum CombineFunction { return Double.compare(floatVersion, input) == 0 || input == 0.0d ? 0 : 1.d - (floatVersion) / input; } - public abstract ComplexExplanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost); + public abstract Explanation explain(float queryBoost, Explanation queryExpl, Explanation funcExpl, float maxBoost); } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java index 5729d7c9b31..135cb53f65f 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FieldValueFactorFunction.java @@ -36,14 +36,20 @@ public class FieldValueFactorFunction extends ScoreFunction { private final String field; private final float boostFactor; private final Modifier modifier; + /** + * Value used if the document is missing the field. + */ + private final Double missing; private final IndexNumericFieldData indexFieldData; - public FieldValueFactorFunction(String field, float boostFactor, Modifier modifierType, IndexNumericFieldData indexFieldData) { + public FieldValueFactorFunction(String field, float boostFactor, Modifier modifierType, Double missing, + IndexNumericFieldData indexFieldData) { super(CombineFunction.MULT); this.field = field; this.boostFactor = boostFactor; this.modifier = modifierType; this.indexFieldData = indexFieldData; + this.missing = missing; } @Override @@ -55,28 +61,32 @@ public class FieldValueFactorFunction extends ScoreFunction { public double score(int docId, float subQueryScore) { values.setDocument(docId); final int numValues = values.count(); + double value; if (numValues > 0) { - double val = values.valueAt(0) * boostFactor; - double result = modifier.apply(val); - if (Double.isNaN(result) || Double.isInfinite(result)) { - throw new ElasticsearchException("Result of field modification [" + modifier.toString() + - "(" + val + ")] must be a number"); - } - return result; + value = values.valueAt(0); + } else if (missing != null) { + value = missing; } else { throw new ElasticsearchException("Missing value for field [" + field + "]"); } + double val = value * boostFactor; + double result = modifier.apply(val); + if (Double.isNaN(result) || Double.isInfinite(result)) { + throw new ElasticsearchException("Result of field modification [" + modifier.toString() + "(" + val + + ")] must be a number"); + } + return result; } @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - Explanation exp = new Explanation(); String modifierStr = modifier != null ? modifier.toString() : ""; + String defaultStr = missing != null ? "?:" + missing : ""; double score = score(docId, subQueryScore.getValue()); - exp.setValue(CombineFunction.toFloat(score)); - exp.setDescription("field value function: " + - modifierStr + "(" + "doc['" + field + "'].value * factor=" + boostFactor + ")"); - return exp; + return Explanation.match( + CombineFunction.toFloat(score), + String.format(Locale.ROOT, + "field value function: %s(doc['%s'].value%s * factor=%s)", modifierStr, field, defaultStr, boostFactor)); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java index b4ff4d7868c..d1835f57098 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/FiltersFunctionScoreQuery.java @@ -119,16 +119,22 @@ public class FiltersFunctionScoreQuery extends Query { // TODO: needsScores // if we dont need scores, just return the underlying Weight? Weight subQueryWeight = subQuery.createWeight(searcher, needsScores); - return new CustomBoostFactorWeight(this, subQueryWeight); + Weight[] filterWeights = new Weight[filterFunctions.length]; + for (int i = 0; i < filterFunctions.length; ++i) { + filterWeights[i] = searcher.createNormalizedWeight(filterFunctions[i].filter, false); + } + return new CustomBoostFactorWeight(this, subQueryWeight, filterWeights); } class CustomBoostFactorWeight extends Weight { final Weight subQueryWeight; + final Weight[] filterWeights; - public CustomBoostFactorWeight(Query parent, Weight subQueryWeight) throws IOException { + public CustomBoostFactorWeight(Query parent, Weight subQueryWeight, Weight[] filterWeights) throws IOException { super(parent); this.subQueryWeight = subQueryWeight; + this.filterWeights = filterWeights; } @Override @@ -162,7 +168,8 @@ public class FiltersFunctionScoreQuery extends Query { for (int i = 0; i < filterFunctions.length; i++) { FilterFunction filterFunction = filterFunctions[i]; functions[i] = filterFunction.function.getLeafScoreFunction(context); - docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterFunction.filter.getDocIdSet(context, acceptDocs)); + Scorer filterScorer = filterWeights[i].scorer(context, null); // no need to apply accepted docs + docSets[i] = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), filterScorer); } return new FiltersFunctionFactorScorer(this, subQueryScorer, scoreMode, filterFunctions, maxBoost, functions, docSets, combineFunction, minScore); } @@ -175,9 +182,10 @@ public class FiltersFunctionScoreQuery extends Query { return subQueryExpl; } // First: Gather explanations for all filters - List filterExplanations = new ArrayList<>(); + List filterExplanations = new ArrayList<>(); float weightSum = 0; - for (FilterFunction filterFunction : filterFunctions) { + for (int i = 0; i < filterFunctions.length; ++i) { + FilterFunction filterFunction = filterFunctions[i]; if (filterFunction.function instanceof WeightFactorFunction) { weightSum += ((WeightFactorFunction) filterFunction.function).getWeight(); @@ -186,23 +194,21 @@ public class FiltersFunctionScoreQuery extends Query { } Bits docSet = DocIdSets.asSequentialAccessBits(context.reader().maxDoc(), - filterFunction.filter.getDocIdSet(context, context.reader().getLiveDocs())); + filterWeights[i].scorer(context, null)); if (docSet.get(doc)) { Explanation functionExplanation = filterFunction.function.getLeafScoreFunction(context).explainScore(doc, subQueryExpl); double factor = functionExplanation.getValue(); float sc = CombineFunction.toFloat(factor); - ComplexExplanation filterExplanation = new ComplexExplanation(true, sc, "function score, product of:"); - filterExplanation.addDetail(new Explanation(1.0f, "match filter: " + filterFunction.filter.toString())); - filterExplanation.addDetail(functionExplanation); + Explanation filterExplanation = Explanation.match(sc, "function score, product of:", + Explanation.match(1.0f, "match filter: " + filterFunction.filter.toString()), functionExplanation); filterExplanations.add(filterExplanation); } } if (filterExplanations.size() == 0) { float sc = getBoost() * subQueryExpl.getValue(); - Explanation res = new ComplexExplanation(true, sc, "function score, no filter match, product of:"); - res.addDetail(subQueryExpl); - res.addDetail(new Explanation(getBoost(), "queryBoost")); - return res; + return Explanation.match(sc, "function score, no filter match, product of:", + subQueryExpl, + Explanation.match(getBoost(), "queryBoost")); } // Second: Compute the factor that would have been computed by the @@ -242,12 +248,11 @@ public class FiltersFunctionScoreQuery extends Query { } } } - ComplexExplanation factorExplanaition = new ComplexExplanation(true, CombineFunction.toFloat(factor), - "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]"); - for (int i = 0; i < filterExplanations.size(); i++) { - factorExplanaition.addDetail(filterExplanations.get(i)); - } - return combineFunction.explain(getBoost(), subQueryExpl, factorExplanaition, maxBoost); + Explanation factorExplanation = Explanation.match( + CombineFunction.toFloat(factor), + "function score, score mode [" + scoreMode.toString().toLowerCase(Locale.ROOT) + "]", + filterExplanations); + return combineFunction.explain(getBoost(), subQueryExpl, factorExplanation, maxBoost); } } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java index 2e42cb92af0..934640e4ae0 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/RandomScoreFunction.java @@ -74,9 +74,9 @@ public class RandomScoreFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - Explanation exp = new Explanation(); - exp.setDescription("random score function (seed: " + originalSeed + ")"); - return exp; + return Explanation.match( + CombineFunction.toFloat(score(docId, subQueryScore.getValue())), + "random score function (seed: " + originalSeed + ")"); } }; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java index 787d8c1a955..860588207f0 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/ScriptScoreFunction.java @@ -117,10 +117,12 @@ public class ScriptScoreFunction extends ScoreFunction { if (params != null) { explanation += "\" and parameters: \n" + params.toString(); } - exp = new Explanation(CombineFunction.toFloat(score), explanation); - Explanation scoreExp = new Explanation(subQueryScore.getValue(), "_score: "); - scoreExp.addDetail(subQueryScore); - exp.addDetail(scoreExp); + Explanation scoreExp = Explanation.match( + subQueryScore.getValue(), "_score: ", + subQueryScore); + return Explanation.match( + CombineFunction.toFloat(score), explanation, + scoreExp); } return exp; } diff --git a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java index cda0fa0477e..db651ab8012 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java +++ b/src/main/java/org/elasticsearch/common/lucene/search/function/WeightFactorFunction.java @@ -20,9 +20,7 @@ package org.elasticsearch.common.lucene.search.function; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.io.IOException; @@ -38,7 +36,7 @@ public class WeightFactorFunction extends ScoreFunction { public WeightFactorFunction(float weight, ScoreFunction scoreFunction) { super(CombineFunction.MULT); if (scoreFunction instanceof BoostScoreFunction) { - throw new ElasticsearchIllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); + throw new IllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); } if (scoreFunction == null) { this.scoreFunction = SCORE_ONE; @@ -65,18 +63,16 @@ public class WeightFactorFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { - Explanation functionScoreExplanation; Explanation functionExplanation = leafFunction.explainScore(docId, subQueryScore); - functionScoreExplanation = new ComplexExplanation(true, functionExplanation.getValue() * (float) getWeight(), "product of:"); - functionScoreExplanation.addDetail(functionExplanation); - functionScoreExplanation.addDetail(explainWeight()); - return functionScoreExplanation; + return Explanation.match( + functionExplanation.getValue() * (float) getWeight(), "product of:", + functionExplanation, explainWeight()); } }; } public Explanation explainWeight() { - return new Explanation(getWeight(), "weight"); + return Explanation.match(getWeight(), "weight"); } public float getWeight() { @@ -99,7 +95,7 @@ public class WeightFactorFunction extends ScoreFunction { @Override public Explanation explainScore(int docId, Explanation subQueryScore) { - return new Explanation(1.0f, "constant score 1.0 - no function provided"); + return Explanation.match(1.0f, "constant score 1.0 - no function provided"); } }; } diff --git a/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java b/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java index 8c41c1b79c1..5b4515f7b96 100644 --- a/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java +++ b/src/main/java/org/elasticsearch/common/property/PropertyPlaceholder.java @@ -19,7 +19,7 @@ package org.elasticsearch.common.property; -import org.elasticsearch.common.Preconditions; +import com.google.common.base.Preconditions; import org.elasticsearch.common.Strings; import java.util.HashSet; @@ -159,7 +159,7 @@ public class PropertyPlaceholder { * * @see PropertyPlaceholder */ - public static interface PlaceholderResolver { + public interface PlaceholderResolver { /** * Resolves the supplied placeholder name into the replacement value. diff --git a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java index 07298b8e3e2..026711e2313 100644 --- a/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java +++ b/src/main/java/org/elasticsearch/common/recycler/DequeRecycler.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.recycler; -import org.elasticsearch.ElasticsearchIllegalStateException; import java.util.Deque; @@ -89,7 +88,7 @@ public class DequeRecycler extends AbstractRecycler { @Override public void close() { if (value == null) { - throw new ElasticsearchIllegalStateException("recycler entry already released..."); + throw new IllegalStateException("recycler entry already released..."); } final boolean recycle = beforeRelease(); if (recycle) { diff --git a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java index f347a739607..258e4355b9f 100644 --- a/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java +++ b/src/main/java/org/elasticsearch/common/recycler/NoneRecycler.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.recycler; -import org.elasticsearch.ElasticsearchIllegalStateException; /** */ @@ -60,7 +59,7 @@ public class NoneRecycler extends AbstractRecycler { @Override public void close() { if (value == null) { - throw new ElasticsearchIllegalStateException("recycler entry already released..."); + throw new IllegalStateException("recycler entry already released..."); } value = null; } diff --git a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java index da526664513..4344a0689c1 100644 --- a/src/main/java/org/elasticsearch/common/recycler/Recyclers.java +++ b/src/main/java/org/elasticsearch/common/recycler/Recyclers.java @@ -22,9 +22,6 @@ package org.elasticsearch.common.recycler; import com.carrotsearch.hppc.hash.MurmurHash3; import com.google.common.collect.Queues; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; - -import java.lang.ref.SoftReference; public enum Recyclers { ; @@ -128,7 +125,7 @@ public enum Recyclers { return new Recycler.V() { @Override - public void close() throws ElasticsearchException { + public void close() { synchronized (lock) { delegate.close(); } @@ -155,7 +152,7 @@ public enum Recyclers { */ public static Recycler concurrent(final Recycler.Factory factory, final int concurrencyLevel) { if (concurrencyLevel < 1) { - throw new ElasticsearchIllegalArgumentException("concurrencyLevel must be >= 1"); + throw new IllegalArgumentException("concurrencyLevel must be >= 1"); } if (concurrencyLevel == 1) { return locked(factory.build()); diff --git a/src/main/java/org/elasticsearch/common/regex/Regex.java b/src/main/java/org/elasticsearch/common/regex/Regex.java index 67f4f13c420..f5c3094e31d 100644 --- a/src/main/java/org/elasticsearch/common/regex/Regex.java +++ b/src/main/java/org/elasticsearch/common/regex/Regex.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.regex; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import java.util.Locale; @@ -154,7 +153,7 @@ public class Regex { } else if ("UNICODE_CHAR_CLASS".equals(s)) { pFlags |= UNICODE_CHARACTER_CLASS; } else { - throw new ElasticsearchIllegalArgumentException("Unknown regex flag [" + s + "]"); + throw new IllegalArgumentException("Unknown regex flag [" + s + "]"); } } return pFlags; diff --git a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java index 02a9725b37b..107324949db 100644 --- a/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java +++ b/src/main/java/org/elasticsearch/common/rounding/TimeZoneRounding.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.rounding; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.TimeValue; @@ -60,7 +59,7 @@ public abstract class TimeZoneRounding extends Rounding { public Builder(TimeValue interval) { this.unit = null; if (interval.millis() < 1) - throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported"); + throw new IllegalArgumentException("Zero or negative time interval not supported"); this.interval = interval.millis(); } @@ -169,7 +168,7 @@ public abstract class TimeZoneRounding extends Rounding { TimeIntervalRounding(long interval, DateTimeZone timeZone) { if (interval < 1) - throw new ElasticsearchIllegalArgumentException("Zero or negative time interval not supported"); + throw new IllegalArgumentException("Zero or negative time interval not supported"); this.interval = interval; this.timeZone = timeZone; } diff --git a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java index eca908ddae9..8738d30e774 100644 --- a/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java +++ b/src/main/java/org/elasticsearch/common/settings/ImmutableSettings.java @@ -25,7 +25,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Classes; @@ -506,7 +505,7 @@ public class ImmutableSettings implements Settings { @Override public Map getGroups(String settingPrefix, boolean ignoreNonGrouped) throws SettingsException { if (!Strings.hasLength(settingPrefix)) { - throw new ElasticsearchIllegalArgumentException("illegal setting prefix " + settingPrefix); + throw new IllegalArgumentException("illegal setting prefix " + settingPrefix); } if (settingPrefix.charAt(settingPrefix.length() - 1) != '.') { settingPrefix = settingPrefix + "."; @@ -695,7 +694,7 @@ public class ImmutableSettings implements Settings { } } if ((settings.length % 2) != 0) { - throw new ElasticsearchIllegalArgumentException("array settings of key + value order doesn't hold correct number of arguments (" + settings.length + ")"); + throw new IllegalArgumentException("array settings of key + value order doesn't hold correct number of arguments (" + settings.length + ")"); } for (int i = 0; i < settings.length; i++) { put(settings[i++].toString(), settings[i].toString()); @@ -919,7 +918,7 @@ public class ImmutableSettings implements Settings { for (String s : values) { int index = s.indexOf('='); if (index == -1) { - throw new ElasticsearchIllegalArgumentException("value [" + s + "] for settings loaded with delimiter [" + delimiter + "] is malformed, missing ="); + throw new IllegalArgumentException("value [" + s + "] for settings loaded with delimiter [" + delimiter + "] is malformed, missing ="); } map.put(s.substring(0, index), s.substring(index + 1)); } diff --git a/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java index 7d788ed298f..f86bb01de76 100644 --- a/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java +++ b/src/main/java/org/elasticsearch/common/transport/DummyTransportAddress.java @@ -51,4 +51,9 @@ public class DummyTransportAddress implements TransportAddress { @Override public void writeTo(StreamOutput out) throws IOException { } + + @Override + public String toString() { + return "_dummy_addr_"; + } } diff --git a/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java b/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java index 1bc519435de..bfa4233d917 100644 --- a/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java +++ b/src/main/java/org/elasticsearch/common/transport/InetSocketTransportAddress.java @@ -38,6 +38,10 @@ public class InetSocketTransportAddress implements TransportAddress { InetSocketTransportAddress.resolveAddress = resolveAddress; } + public static boolean getResolveAddress() { + return resolveAddress; + } + private InetSocketAddress address; InetSocketTransportAddress() { diff --git a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java index 242000baad2..f3aa0941d46 100644 --- a/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java +++ b/src/main/java/org/elasticsearch/common/transport/TransportAddressSerializers.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.transport; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.ESLogger; @@ -57,7 +56,7 @@ public abstract class TransportAddressSerializers { public static synchronized void addAddressType(TransportAddress address) throws Exception { if (addressConstructors.containsKey(address.uniqueAddressTypeId())) { - throw new ElasticsearchIllegalStateException("Address [" + address.uniqueAddressTypeId() + "] already bound"); + throw new IllegalStateException("Address [" + address.uniqueAddressTypeId() + "] already bound"); } Constructor constructor = address.getClass().getDeclaredConstructor(); constructor.setAccessible(true); diff --git a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java index 539b25de9b2..153417f869f 100644 --- a/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java +++ b/src/main/java/org/elasticsearch/common/unit/ByteSizeValue.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,10 +51,10 @@ public class ByteSizeValue implements Serializable, Streamable { this.sizeUnit = sizeUnit; } - public int bytesAsInt() throws ElasticsearchIllegalArgumentException { + public int bytesAsInt() { long bytes = bytes(); if (bytes > Integer.MAX_VALUE) { - throw new ElasticsearchIllegalArgumentException("size [" + toString() + "] is bigger than max int"); + throw new IllegalArgumentException("size [" + toString() + "] is bigger than max int"); } return (int) bytes; } @@ -250,4 +249,4 @@ public class ByteSizeValue implements Serializable, Streamable { result = 31 * result + (sizeUnit != null ? sizeUnit.hashCode() : 0); return result; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java index 25eb6680aca..cb89ca83d51 100644 --- a/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java +++ b/src/main/java/org/elasticsearch/common/unit/DistanceUnit.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -179,7 +178,7 @@ public enum DistanceUnit { * * @param unit name of the unit * @return unit matching the given name - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if no unit matches the given name + * @throws IllegalArgumentException if no unit matches the given name */ public static DistanceUnit fromString(String unit) { for (DistanceUnit dunit : values()) { @@ -189,7 +188,7 @@ public enum DistanceUnit { } } } - throw new ElasticsearchIllegalArgumentException("No distance unit match [" + unit + "]"); + throw new IllegalArgumentException("No distance unit match [" + unit + "]"); } /** @@ -233,7 +232,7 @@ public enum DistanceUnit { byte b = in.readByte(); if(b<0 || b>=values().length) { - throw new ElasticsearchIllegalArgumentException("No type for distance unit matching [" + b + "]"); + throw new IllegalArgumentException("No type for distance unit matching [" + b + "]"); } else { return values()[b]; } diff --git a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java index 712b37abcf9..a4ac1fda851 100644 --- a/src/main/java/org/elasticsearch/common/unit/Fuzziness.java +++ b/src/main/java/org/elasticsearch/common/unit/Fuzziness.java @@ -18,11 +18,10 @@ */ package org.elasticsearch.common.unit; +import com.google.common.base.Preconditions; import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; @@ -111,7 +110,7 @@ public final class Fuzziness implements ToXContent { } default: - throw new ElasticsearchIllegalArgumentException("Can't parse fuzziness on token: [" + token + "]"); + throw new IllegalArgumentException("Can't parse fuzziness on token: [" + token + "]"); } } @@ -243,7 +242,7 @@ public final class Fuzziness implements ToXContent { return similarity; } } - throw new ElasticsearchIllegalArgumentException("Can't get similarity from fuzziness [" + fuzziness + "]"); + throw new IllegalArgumentException("Can't get similarity from fuzziness [" + fuzziness + "]"); } private int termLen(String text) { diff --git a/src/main/java/org/elasticsearch/common/unit/SizeValue.java b/src/main/java/org/elasticsearch/common/unit/SizeValue.java index 458c855d833..415326f7069 100644 --- a/src/main/java/org/elasticsearch/common/unit/SizeValue.java +++ b/src/main/java/org/elasticsearch/common/unit/SizeValue.java @@ -19,8 +19,8 @@ package org.elasticsearch.common.unit; +import com.google.common.base.Preconditions; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/common/util/BigArrays.java b/src/main/java/org/elasticsearch/common/util/BigArrays.java index 8467c3c5fcb..fa202f2d719 100644 --- a/src/main/java/org/elasticsearch/common/util/BigArrays.java +++ b/src/main/java/org/elasticsearch/common/util/BigArrays.java @@ -39,9 +39,9 @@ import org.elasticsearch.indices.breaker.CircuitBreakerService; import java.util.Arrays; /** Utility class to work with arrays. */ -public class BigArrays extends AbstractComponent { +public class BigArrays { - public static final BigArrays NON_RECYCLING_INSTANCE = new BigArrays(ImmutableSettings.EMPTY, null, null); + public static final BigArrays NON_RECYCLING_INSTANCE = new BigArrays(null, null); /** Page size in bytes: 16KB */ public static final int PAGE_SIZE_IN_BYTES = 1 << 14; @@ -364,18 +364,23 @@ public class BigArrays extends AbstractComponent { final PageCacheRecycler recycler; final CircuitBreakerService breakerService; final boolean checkBreaker; + private final BigArrays circuitBreakingInstance; @Inject - public BigArrays(Settings settings, PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService) { + public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService) { // Checking the breaker is disabled if not specified - this(settings, recycler, breakerService, false); + this(recycler, breakerService, false); } - public BigArrays(Settings settings, PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService, boolean checkBreaker) { - super(settings); + public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService, boolean checkBreaker) { this.checkBreaker = checkBreaker; this.recycler = recycler; this.breakerService = breakerService; + if (checkBreaker) { + this.circuitBreakingInstance = this; + } else { + this.circuitBreakingInstance = new BigArrays(recycler, breakerService, true); + } } /** @@ -411,11 +416,11 @@ public class BigArrays extends AbstractComponent { } /** - * Return a new instance of this BigArrays class with circuit breaking + * Return an instance of this BigArrays class with circuit breaking * explicitly enabled, instead of only accounting enabled */ public BigArrays withCircuitBreaking() { - return new BigArrays(this.settings, this.recycler, this.breakerService, true); + return this.circuitBreakingInstance; } private T resizeInPlace(T array, long newSize) { diff --git a/src/main/java/org/elasticsearch/common/util/BloomFilter.java b/src/main/java/org/elasticsearch/common/util/BloomFilter.java index 6df7477a9cb..8a2acba8d28 100644 --- a/src/main/java/org/elasticsearch/common/util/BloomFilter.java +++ b/src/main/java/org/elasticsearch/common/util/BloomFilter.java @@ -25,7 +25,6 @@ import org.apache.lucene.store.DataOutput; import org.apache.lucene.store.IndexInput; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.hash.MurmurHash3; @@ -486,7 +485,7 @@ public class BloomFilter { } if (type == 1) { return Hashing.V1; } else { - throw new ElasticsearchIllegalArgumentException("no hashing type matching " + type); + throw new IllegalArgumentException("no hashing type matching " + type); } } } diff --git a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java index 76189877ca0..de5171d50dc 100644 --- a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java +++ b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java @@ -23,8 +23,8 @@ import com.carrotsearch.hppc.DoubleArrayList; import com.carrotsearch.hppc.FloatArrayList; import com.carrotsearch.hppc.LongArrayList; import com.carrotsearch.hppc.ObjectArrayList; +import com.google.common.base.Preconditions; import org.apache.lucene.util.*; -import org.elasticsearch.common.Preconditions; import java.util.*; diff --git a/src/main/java/org/elasticsearch/common/util/LocaleUtils.java b/src/main/java/org/elasticsearch/common/util/LocaleUtils.java index 50f494f2d5a..2e6c01a1ca7 100644 --- a/src/main/java/org/elasticsearch/common/util/LocaleUtils.java +++ b/src/main/java/org/elasticsearch/common/util/LocaleUtils.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.Locale; @@ -47,7 +46,7 @@ public class LocaleUtils { // lang return new Locale(parts[0]); default: - throw new ElasticsearchIllegalArgumentException("Can't parse locale: [" + localeStr + "]"); + throw new IllegalArgumentException("Can't parse locale: [" + localeStr + "]"); } } diff --git a/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java b/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java index eff145ae80b..7949989c2a0 100644 --- a/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java +++ b/src/main/java/org/elasticsearch/common/util/LongObjectPagedHashMap.java @@ -166,7 +166,7 @@ public class LongObjectPagedHashMap extends AbstractPagedHashMap implements I } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(keys, values); } diff --git a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java index 02d35ac0dcb..1cb700cff60 100644 --- a/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java +++ b/src/main/java/org/elasticsearch/common/util/MultiDataPathUpgrader.java @@ -27,7 +27,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -127,7 +126,7 @@ public class MultiDataPathUpgrader { out.flush(); if (!status.clean) { logger.warn("check index [failure]\n{}", new String(os.bytes().toBytes(), Charsets.UTF_8)); - throw new ElasticsearchIllegalStateException("index check failure"); + throw new IllegalStateException("index check failure"); } } } diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java index bfd530c856e..b2a80fc68db 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/CountDown.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicInteger; @@ -36,7 +35,7 @@ public final class CountDown { public CountDown(int count) { if (count < 0) { - throw new ElasticsearchIllegalArgumentException("count must be greater or equal to 0 but was: " + count); + throw new IllegalArgumentException("count must be greater or equal to 0 but was: " + count); } this.originalCount = count; this.countDown = new AtomicInteger(count); diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java index cf6239445d8..8bb16869c47 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsAbortPolicy.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.metrics.CounterMetric; import java.util.concurrent.BlockingQueue; @@ -38,13 +37,13 @@ public class EsAbortPolicy implements XRejectedExecutionHandler { if (((AbstractRunnable) r).isForceExecution()) { BlockingQueue queue = executor.getQueue(); if (!(queue instanceof SizeBlockingQueue)) { - throw new ElasticsearchIllegalStateException("forced execution, but expected a size queue"); + throw new IllegalStateException("forced execution, but expected a size queue"); } try { ((SizeBlockingQueue) queue).forcePut(r); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new ElasticsearchIllegalStateException("forced execution, but got interrupted", e); + throw new IllegalStateException("forced execution, but got interrupted", e); } return; } diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java index e727d35304d..8e21065e2f4 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/EsThreadPoolExecutor.java @@ -19,8 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.action.ActionRunnable; import java.util.concurrent.*; @@ -44,7 +42,7 @@ public class EsThreadPoolExecutor extends ThreadPoolExecutor { public void shutdown(ShutdownListener listener) { synchronized (monitor) { if (this.listener != null) { - throw new ElasticsearchIllegalStateException("Shutdown was already called on this thread pool"); + throw new IllegalStateException("Shutdown was already called on this thread pool"); } if (isTerminated()) { listener.onTerminated(); diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java index 862bc6d9645..fb62a628244 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/KeyedLock.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicInteger; @@ -39,6 +38,19 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; * */ public class KeyedLock { + private final boolean fair; + + /** + * @param fair Use fair locking, ie threads get the lock in the order they requested it + */ + public KeyedLock(boolean fair) { + this.fair = fair; + } + + public KeyedLock() { + this(false); + } + private final ConcurrentMap map = ConcurrentCollections.newConcurrentMap(); protected final ThreadLocal threadLocal = new ThreadLocal<>(); @@ -47,12 +59,12 @@ public class KeyedLock { while (true) { if (threadLocal.get() != null) { // if we are here, the thread already has the lock - throw new ElasticsearchIllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() + throw new IllegalStateException("Lock already acquired in Thread" + Thread.currentThread().getId() + " for key " + key); } KeyLock perNodeLock = map.get(key); if (perNodeLock == null) { - KeyLock newLock = new KeyLock(); + KeyLock newLock = new KeyLock(fair); perNodeLock = map.putIfAbsent(key, newLock); if (perNodeLock == null) { newLock.lock(); @@ -73,7 +85,7 @@ public class KeyedLock { public void release(T key) { KeyLock lock = threadLocal.get(); if (lock == null) { - throw new ElasticsearchIllegalStateException("Lock not acquired"); + throw new IllegalStateException("Lock not acquired"); } release(key, lock); } @@ -92,6 +104,10 @@ public class KeyedLock { @SuppressWarnings("serial") private final static class KeyLock extends ReentrantLock { + KeyLock(boolean fair) { + super(fair); + } + private final AtomicInteger count = new AtomicInteger(1); } @@ -105,7 +121,17 @@ public class KeyedLock { */ public final static class GlobalLockable extends KeyedLock { - private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + private final ReadWriteLock lock; + + public GlobalLockable(boolean fair){ + super(fair); + lock = new ReentrantReadWriteLock(fair); + } + + public GlobalLockable() { + this(false); + } @Override public void acquire(T key) { @@ -125,7 +151,7 @@ public class KeyedLock { public void release(T key) { KeyLock keyLock = threadLocal.get(); if (keyLock == null) { - throw new ElasticsearchIllegalStateException("Lock not acquired"); + throw new IllegalStateException("Lock not acquired"); } try { release(key, keyLock); diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java index f174d2e82b6..bff4ee613e1 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/SizeBlockingQueue.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.util.concurrent; -import org.elasticsearch.ElasticsearchIllegalStateException; import java.util.AbstractQueue; import java.util.Collection; @@ -146,13 +145,13 @@ public class SizeBlockingQueue extends AbstractQueue implements BlockingQu @Override public boolean offer(E e, long timeout, TimeUnit unit) throws InterruptedException { // note, not used in ThreadPoolExecutor - throw new ElasticsearchIllegalStateException("offer with timeout not allowed on size queue"); + throw new IllegalStateException("offer with timeout not allowed on size queue"); } @Override public void put(E e) throws InterruptedException { // note, not used in ThreadPoolExecutor - throw new ElasticsearchIllegalStateException("put not allowed on size queue"); + throw new IllegalStateException("put not allowed on size queue"); } @Override diff --git a/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java b/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java index a0e7c9e0742..89b71b60d57 100644 --- a/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java +++ b/src/main/java/org/elasticsearch/common/util/concurrent/UncategorizedExecutionException.java @@ -26,10 +26,6 @@ import org.elasticsearch.ElasticsearchException; */ public class UncategorizedExecutionException extends ElasticsearchException { - public UncategorizedExecutionException(String msg) { - super(msg); - } - public UncategorizedExecutionException(String msg, Throwable cause) { super(msg, cause); } diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java index 61cdc84f0b8..9ae1a03a67d 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.common.xcontent; import com.fasterxml.jackson.dataformat.cbor.CBORConstants; import com.fasterxml.jackson.dataformat.smile.SmileConstants; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -111,7 +110,7 @@ public class XContentFactory { } else if (type == XContentType.CBOR) { return cborBuilder(outputStream); } - throw new ElasticsearchIllegalArgumentException("No matching content type for " + type); + throw new IllegalArgumentException("No matching content type for " + type); } /** @@ -127,7 +126,7 @@ public class XContentFactory { } else if (type == XContentType.CBOR) { return CborXContent.contentBuilder(); } - throw new ElasticsearchIllegalArgumentException("No matching content type for " + type); + throw new IllegalArgumentException("No matching content type for " + type); } /** diff --git a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java b/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java similarity index 58% rename from src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java rename to src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java index c5bec8c5d9b..ade2a457797 100644 --- a/src/main/java/org/elasticsearch/common/lucene/search/NoCacheQuery.java +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentLocation.java @@ -17,20 +17,21 @@ * under the License. */ -package org.elasticsearch.common.lucene.search; - -import org.apache.lucene.search.Query; +package org.elasticsearch.common.xcontent; /** - * Queries are never cached directly, but a query can be wrapped in a filter that may end being cached. - * Filters that wrap this query either directly or indirectly will never be cached. + * Simple data structure representing the line and column number of a position + * in some XContent e.g. JSON. Locations are typically used to communicate the + * position of a parsing error to end users and consequently have line and + * column numbers starting from 1. */ -public abstract class NoCacheQuery extends Query { +public class XContentLocation { + public final int lineNumber; + public final int columnNumber; - @Override - public final String toString(String s) { - return "no_cache(" + innerToString(s) + ")"; + public XContentLocation(int lineNumber, int columnNumber) { + super(); + this.lineNumber = lineNumber; + this.columnNumber = columnNumber; } - - public abstract String innerToString(String s); } diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java index 0aab32c4ba3..738fd9f6e72 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java +++ b/src/main/java/org/elasticsearch/common/xcontent/XContentParser.java @@ -241,4 +241,12 @@ public interface XContentParser extends Releasable { * */ byte[] binaryValue() throws IOException; + + /** + * Used for error reporting to highlight where syntax errors occur in + * content being parsed. + * + * @return last token's location or null if cannot be determined + */ + XContentLocation getTokenLocation(); } diff --git a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java index 321b3bbe225..5d3a3f99f4e 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java +++ b/src/main/java/org/elasticsearch/common/xcontent/json/JsonXContentParser.java @@ -19,11 +19,13 @@ package org.elasticsearch.common.xcontent.json; +import com.fasterxml.jackson.core.JsonLocation; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; + import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.AbstractXContentParser; @@ -188,6 +190,15 @@ public class JsonXContentParser extends AbstractXContentParser { return parser.getBinaryValue(); } + @Override + public XContentLocation getTokenLocation() { + JsonLocation loc = parser.getTokenLocation(); + if (loc == null) { + return null; + } + return new XContentLocation(loc.getLineNr(), loc.getColumnNr()); + } + @Override public void close() { IOUtils.closeWhileHandlingException(parser); @@ -204,7 +215,7 @@ public class JsonXContentParser extends AbstractXContentParser { case DOUBLE: return NumberType.DOUBLE; } - throw new ElasticsearchIllegalStateException("No matching token for number_type [" + numberType + "]"); + throw new IllegalStateException("No matching token for number_type [" + numberType + "]"); } private Token convertToken(JsonToken token) { @@ -235,6 +246,6 @@ public class JsonXContentParser extends AbstractXContentParser { case VALUE_EMBEDDED_OBJECT: return Token.VALUE_EMBEDDED_OBJECT; } - throw new ElasticsearchIllegalStateException("No matching token for json_token [" + token + "]"); + throw new IllegalStateException("No matching token for json_token [" + token + "]"); } } diff --git a/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java b/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java index dbd11e1bc43..3a69f911fa0 100644 --- a/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java +++ b/src/main/java/org/elasticsearch/common/xcontent/support/XContentMapValues.java @@ -387,4 +387,23 @@ public class XContentMapValues { throw new ElasticsearchParseException(desc + " should be a hash but was of type: " + node.getClass()); } } + + /** + * Returns an array of string value from a node value. + * + * If the node represents an array the corresponding array of strings is returned. + * Otherwise the node is treated as a comma-separated string. + */ + public static String[] nodeStringArrayValue(Object node) { + if (isArray(node)) { + List list = (List) node; + String[] arr = new String[list.size()]; + for (int i = 0; i < arr.length; i++) { + arr[i] = nodeStringValue(list.get(i), null); + } + return arr; + } else { + return Strings.splitStringByCommaToArray(node.toString()); + } + } } diff --git a/src/main/java/org/elasticsearch/discovery/Discovery.java b/src/main/java/org/elasticsearch/discovery/Discovery.java index dfd51e6348f..36b8e5da6f5 100644 --- a/src/main/java/org/elasticsearch/discovery/Discovery.java +++ b/src/main/java/org/elasticsearch/discovery/Discovery.java @@ -19,6 +19,7 @@ package org.elasticsearch.discovery; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -59,7 +60,7 @@ public interface Discovery extends LifecycleComponent { * The {@link AckListener} allows to keep track of the ack received from nodes, and verify whether * they updated their own cluster state or not. */ - void publish(ClusterState clusterState, AckListener ackListener); + void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener); public static interface AckListener { void onNodeAck(DiscoveryNode node, @Nullable Throwable t); diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java index f73f2bbb593..a95c313447b 100644 --- a/src/main/java/org/elasticsearch/discovery/DiscoveryService.java +++ b/src/main/java/org/elasticsearch/discovery/DiscoveryService.java @@ -21,6 +21,7 @@ package org.elasticsearch.discovery; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -78,7 +79,7 @@ public class DiscoveryService extends AbstractLifecycleComponent implem private static final ConcurrentMap clusterGroups = ConcurrentCollections.newConcurrentMap(); + private volatile ClusterState lastProcessedClusterState; + @Inject public LocalDiscovery(Settings settings, ClusterName clusterName, TransportService transportService, ClusterService clusterService, DiscoveryNodeService discoveryNodeService, Version version, DiscoverySettings discoverySettings) { @@ -99,7 +104,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { synchronized (clusterGroups) { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { @@ -192,7 +197,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { synchronized (clusterGroups) { ClusterGroup clusterGroup = clusterGroups.get(clusterName); if (clusterGroup == null) { @@ -250,7 +255,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override @@ -274,9 +279,9 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } @Override - public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { + public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { if (!master) { - throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); + throw new IllegalStateException("Shouldn't publish state when not master"); } LocalDiscovery[] members = members(); if (members.length > 0) { @@ -287,7 +292,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } nodesToPublishTo.add(localDiscovery.localNode); } - publish(members, clusterState, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(members, clusterChangedEvent, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } } @@ -300,17 +305,47 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem return members.toArray(new LocalDiscovery[members.size()]); } - private void publish(LocalDiscovery[] members, ClusterState clusterState, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { + private void publish(LocalDiscovery[] members, ClusterChangedEvent clusterChangedEvent, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { try { // we do the marshaling intentionally, to check it works well... - final byte[] clusterStateBytes = Builder.toBytes(clusterState); + byte[] clusterStateBytes = null; + byte[] clusterStateDiffBytes = null; + ClusterState clusterState = clusterChangedEvent.state(); for (final LocalDiscovery discovery : members) { if (discovery.master) { continue; } - final ClusterState nodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); + ClusterState newNodeSpecificClusterState = null; + synchronized (this) { + // we do the marshaling intentionally, to check it works well... + // check if we publsihed cluster state at least once and node was in the cluster when we published cluster state the last time + if (discovery.lastProcessedClusterState != null && clusterChangedEvent.previousState().nodes().nodeExists(discovery.localNode.id())) { + // both conditions are true - which means we can try sending cluster state as diffs + if (clusterStateDiffBytes == null) { + Diff diff = clusterState.diff(clusterChangedEvent.previousState()); + BytesStreamOutput os = new BytesStreamOutput(); + diff.writeTo(os); + clusterStateDiffBytes = os.bytes().toBytes(); + } + try { + newNodeSpecificClusterState = discovery.lastProcessedClusterState.readDiffFrom(new BytesStreamInput(clusterStateDiffBytes)).apply(discovery.lastProcessedClusterState); + logger.debug("sending diff cluster state version with size {} to [{}]", clusterStateDiffBytes.length, discovery.localNode.getName()); + } catch (IncompatibleClusterStateVersionException ex) { + logger.warn("incompatible cluster state version - resending complete cluster state", ex); + } + } + if (newNodeSpecificClusterState == null) { + if (clusterStateBytes == null) { + clusterStateBytes = Builder.toBytes(clusterState); + } + newNodeSpecificClusterState = ClusterState.Builder.fromBytes(clusterStateBytes, discovery.localNode); + } + discovery.lastProcessedClusterState = newNodeSpecificClusterState; + } + final ClusterState nodeSpecificClusterState = newNodeSpecificClusterState; + nodeSpecificClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); // ignore cluster state messages that do not include "me", not in the game yet... if (nodeSpecificClusterState.nodes().localNode() != null) { @@ -379,7 +414,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implem } catch (Exception e) { // failure to marshal or un-marshal - throw new ElasticsearchIllegalStateException("Cluster state failed to serialize", e); + throw new IllegalStateException("Cluster state failed to serialize", e); } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java b/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java index 40f5f56dc80..d78d22aa983 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java +++ b/src/main/java/org/elasticsearch/discovery/zen/NotMasterException.java @@ -18,27 +18,18 @@ */ package org.elasticsearch.discovery.zen; -import org.elasticsearch.ElasticsearchIllegalStateException; /** * Thrown when a node join request or a master ping reaches a node which is not * currently acting as a master. */ -public class NotMasterException extends ElasticsearchIllegalStateException { - - public NotMasterException() { - super(null); - } +public class NotMasterException extends IllegalStateException { public NotMasterException(String msg) { super(msg); } - public NotMasterException(String msg, Throwable cause) { - super(msg, cause); - } - @Override public Throwable fillInStackTrace() { return null; diff --git a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java index b9279d7e832..5bec60abf04 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ZenDiscovery.java @@ -22,9 +22,6 @@ package org.elasticsearch.discovery.zen; import com.google.common.base.Objects; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -173,10 +170,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.rejoinOnMasterGone = settings.getAsBoolean(SETTING_REJOIN_ON_MASTER_GONE, true); if (this.joinRetryAttempts < 1) { - throw new ElasticsearchIllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); + throw new IllegalArgumentException("'" + SETTING_JOIN_RETRY_ATTEMPTS + "' must be a positive number. got [" + SETTING_JOIN_RETRY_ATTEMPTS + "]"); } if (this.maxPingsFromAnotherMaster < 1) { - throw new ElasticsearchIllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]"); + throw new IllegalArgumentException("'" + SETTING_MAX_PINGS_FROM_ANOTHER_MASTER + "' must be a positive number. got [" + this.maxPingsFromAnotherMaster + "]"); } logger.debug("using ping.timeout [{}], join.timeout [{}], master_election.filter_client [{}], master_election.filter_data [{}]", pingTimeout, joinTimeout, masterElectionFilterClientNodes, masterElectionFilterDataNodes); @@ -195,7 +192,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen this.joinThreadControl = new JoinThreadControl(threadPool); - transportService.registerHandler(DISCOVERY_REJOIN_ACTION_NAME, new RejoinClusterRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_REJOIN_ACTION_NAME, RejoinClusterRequest.class, ThreadPool.Names.SAME, new RejoinClusterRequestHandler()); dynamicSettings.addDynamicSetting(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, new Validator() { @Override @@ -227,7 +224,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { nodesFD.setLocalNode(clusterService.localNode()); joinThreadControl.start(); pingService.start(); @@ -249,7 +246,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { joinThreadControl.stop(); pingService.stop(); masterFD.stop("zen disco stop"); @@ -283,7 +280,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { masterFD.close(); nodesFD.close(); publishClusterState.close(); @@ -331,12 +328,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen @Override - public void publish(ClusterState clusterState, AckListener ackListener) { - if (!clusterState.getNodes().localNodeMaster()) { - throw new ElasticsearchIllegalStateException("Shouldn't publish state when not master"); + public void publish(ClusterChangedEvent clusterChangedEvent, AckListener ackListener) { + if (!clusterChangedEvent.state().getNodes().localNodeMaster()) { + throw new IllegalStateException("Shouldn't publish state when not master"); } - nodesFD.updateNodesAndPing(clusterState); - publishClusterState.publish(clusterState, ackListener); + nodesFD.updateNodesAndPing(clusterChangedEvent.state()); + publishClusterState.publish(clusterChangedEvent, ackListener); } /** @@ -692,12 +689,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen static class ProcessClusterState { final ClusterState clusterState; - final PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed; volatile boolean processed; - ProcessClusterState(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + ProcessClusterState(ClusterState clusterState) { this.clusterState = clusterState; - this.newStateProcessed = newStateProcessed; } } @@ -708,7 +703,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen /* The cluster name can still be null if the state comes from a node that is prev 1.1.1*/ if (incomingClusterName != null && !incomingClusterName.equals(this.clusterName)) { logger.warn("received cluster state from [{}] which is also master but with a different cluster name [{}]", newClusterState.nodes().masterNode(), incomingClusterName); - newStateProcessed.onNewClusterStateFailed(new ElasticsearchIllegalStateException("received state from a node that is not part of the cluster")); + newStateProcessed.onNewClusterStateFailed(new IllegalStateException("received state from a node that is not part of the cluster")); return; } if (localNodeMaster()) { @@ -735,10 +730,10 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } else { if (newClusterState.nodes().localNode() == null) { logger.warn("received a cluster state from [{}] and not part of the cluster, should not happen", newClusterState.nodes().masterNode()); - newStateProcessed.onNewClusterStateFailed(new ElasticsearchIllegalStateException("received state from a node that is not part of the cluster")); + newStateProcessed.onNewClusterStateFailed(new IllegalStateException("received state from a node that is not part of the cluster")); } else { - final ProcessClusterState processClusterState = new ProcessClusterState(newClusterState, newStateProcessed); + final ProcessClusterState processClusterState = new ProcessClusterState(newClusterState); processNewClusterStates.add(processClusterState); assert newClusterState.nodes().masterNode() != null : "received a cluster state without a master"; @@ -881,7 +876,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } if (!currentState.nodes().masterNodeId().equals(newClusterState.nodes().masterNodeId())) { logger.warn("received a cluster state from a different master then the current one, rejecting (received {}, current {})", newClusterState.nodes().masterNode(), currentState.nodes().masterNode()); - throw new ElasticsearchIllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); + throw new IllegalStateException("cluster state from a different master then the current one, rejecting (received " + newClusterState.nodes().masterNode() + ", current " + currentState.nodes().masterNode() + ")"); } else if (newClusterState.version() < currentState.version()) { // if the new state has a smaller version, and it has the same master node, then no need to process it logger.debug("received a cluster state that has a lower version than the current one, ignoring (received {}, current {})", newClusterState.version(), currentState.version()); @@ -1242,13 +1237,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } } - class RejoinClusterRequestHandler extends BaseTransportRequestHandler { - - @Override - public RejoinClusterRequest newInstance() { - return new RejoinClusterRequest(); - } - + class RejoinClusterRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final RejoinClusterRequest request, final TransportChannel channel) throws Exception { clusterService.submitStateUpdateTask("received a request to rejoin the cluster from [" + request.fromNodeId + "]", Priority.IMMEDIATE, new ClusterStateNonMasterUpdateTask() { @@ -1273,11 +1262,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implemen } }); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } class ApplySettings implements NodeSettingsService.Listener { diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java index 867866b8b2b..1159f1c4e6a 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/MasterFaultDetection.java @@ -20,7 +20,6 @@ package org.elasticsearch.discovery.zen.fd; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterService; @@ -77,7 +76,7 @@ public class MasterFaultDetection extends FaultDetection { logger.debug("[master] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount); - transportService.registerHandler(MASTER_PING_ACTION_NAME, new MasterPingRequestHandler()); + transportService.registerRequestHandler(MASTER_PING_ACTION_NAME, MasterPingRequest.class, ThreadPool.Names.SAME, new MasterPingRequestHandler()); } public DiscoveryNode masterNode() { @@ -295,7 +294,7 @@ public class MasterFaultDetection extends FaultDetection { } /** Thrown when a ping reaches the wrong node */ - static class ThisIsNotTheMasterYouAreLookingForException extends ElasticsearchIllegalStateException { + static class ThisIsNotTheMasterYouAreLookingForException extends IllegalStateException { ThisIsNotTheMasterYouAreLookingForException(String msg) { super(msg); @@ -310,19 +309,14 @@ public class MasterFaultDetection extends FaultDetection { } } - static class NodeDoesNotExistOnMasterException extends ElasticsearchIllegalStateException { + static class NodeDoesNotExistOnMasterException extends IllegalStateException { @Override public Throwable fillInStackTrace() { return null; } } - private class MasterPingRequestHandler extends BaseTransportRequestHandler { - - @Override - public MasterPingRequest newInstance() { - return new MasterPingRequest(); - } + private class MasterPingRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final MasterPingRequest request, final TransportChannel channel) throws Exception { @@ -356,7 +350,7 @@ public class MasterFaultDetection extends FaultDetection { // if we are no longer master, fail... DiscoveryNodes nodes = currentState.nodes(); if (!nodes.localNodeMaster()) { - throw new NotMasterException(); + throw new NotMasterException("local node is not master"); } if (!nodes.nodeExists(request.nodeId)) { throw new NodeDoesNotExistOnMasterException(); @@ -390,11 +384,6 @@ public class MasterFaultDetection extends FaultDetection { channel.sendResponse(new MasterPingResponseResponse()); } } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java index 8cbb863dd80..41a524b2629 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java +++ b/src/main/java/org/elasticsearch/discovery/zen/fd/NodesFaultDetection.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen.fd; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -65,7 +64,7 @@ public class NodesFaultDetection extends FaultDetection { logger.debug("[node ] uses ping_interval [{}], ping_timeout [{}], ping_retries [{}]", pingInterval, pingRetryTimeout, pingRetryCount); - transportService.registerHandler(PING_ACTION_NAME, new PingRequestHandler()); + transportService.registerRequestHandler(PING_ACTION_NAME, PingRequest.class, ThreadPool.Names.SAME, new PingRequestHandler()); } public void setLocalNode(DiscoveryNode localNode) { @@ -239,36 +238,25 @@ public class NodesFaultDetection extends FaultDetection { } } - class PingRequestHandler extends BaseTransportRequestHandler { - - @Override - public PingRequest newInstance() { - return new PingRequest(); - } - + class PingRequestHandler implements TransportRequestHandler { @Override public void messageReceived(PingRequest request, TransportChannel channel) throws Exception { // if we are not the node we are supposed to be pinged, send an exception // this can happen when a kill -9 is sent, and another node is started using the same port if (!localNode.id().equals(request.nodeId)) { - throw new ElasticsearchIllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.id() + "]"); + throw new IllegalStateException("Got pinged as node [" + request.nodeId + "], but I am node [" + localNode.id() + "]"); } // PingRequest will have clusterName set to null if it came from a node of version <1.4.0 if (request.clusterName != null && !request.clusterName.equals(clusterName)) { // Don't introduce new exception for bwc reasons - throw new ElasticsearchIllegalStateException("Got pinged with cluster name [" + request.clusterName + "], but I'm part of cluster [" + clusterName + "]"); + throw new IllegalStateException("Got pinged with cluster name [" + request.clusterName + "], but I'm part of cluster [" + clusterName + "]"); } notifyPingReceived(request); channel.sendResponse(new PingResponse()); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java index 36fcde3ddcc..7a7567ea3b4 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/membership/MembershipAction.java @@ -72,9 +72,9 @@ public class MembershipAction extends AbstractComponent { this.listener = listener; this.clusterService = clusterService; - transportService.registerHandler(DISCOVERY_JOIN_ACTION_NAME, new JoinRequestRequestHandler()); - transportService.registerHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequestRequestHandler()); - transportService.registerHandler(DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_JOIN_ACTION_NAME, JoinRequest.class, ThreadPool.Names.GENERIC, new JoinRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME, ValidateJoinRequest.class, ThreadPool.Names.GENERIC, new ValidateJoinRequestRequestHandler()); + transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest.class, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler()); } public void close() { @@ -87,7 +87,7 @@ public class MembershipAction extends AbstractComponent { transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME); } - public void sendLeaveRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) throws ElasticsearchException { + public void sendLeaveRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME).txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -95,7 +95,7 @@ public class MembershipAction extends AbstractComponent { transportService.sendRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME); } - public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) throws ElasticsearchException { + public void sendJoinRequestBlocking(DiscoveryNode masterNode, DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(masterNode, DISCOVERY_JOIN_ACTION_NAME, new JoinRequest(node), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -103,7 +103,7 @@ public class MembershipAction extends AbstractComponent { /** * Validates the join request, throwing a failure if it failed. */ - public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) throws ElasticsearchException { + public void sendValidateJoinRequestBlocking(DiscoveryNode node, TimeValue timeout) { transportService.submitRequest(node, DISCOVERY_JOIN_VALIDATE_ACTION_NAME, new ValidateJoinRequest(), EmptyTransportResponseHandler.INSTANCE_SAME) .txGet(timeout.millis(), TimeUnit.MILLISECONDS); } @@ -133,12 +133,7 @@ public class MembershipAction extends AbstractComponent { } - private class JoinRequestRequestHandler extends BaseTransportRequestHandler { - - @Override - public JoinRequest newInstance() { - return new JoinRequest(); - } + private class JoinRequestRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final JoinRequest request, final TransportChannel channel) throws Exception { @@ -162,36 +157,21 @@ public class MembershipAction extends AbstractComponent { } }); } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } } - class ValidateJoinRequest extends TransportRequest { + static class ValidateJoinRequest extends TransportRequest { ValidateJoinRequest() { } } - private class ValidateJoinRequestRequestHandler extends BaseTransportRequestHandler { - - @Override - public ValidateJoinRequest newInstance() { - return new ValidateJoinRequest(); - } + class ValidateJoinRequestRequestHandler implements TransportRequestHandler { @Override public void messageReceived(ValidateJoinRequest request, TransportChannel channel) throws Exception { // for now, the mere fact that we can serialize the cluster state acts as validation.... channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } } static class LeaveRequest extends TransportRequest { @@ -218,22 +198,12 @@ public class MembershipAction extends AbstractComponent { } } - private class LeaveRequestRequestHandler extends BaseTransportRequestHandler { - - @Override - public LeaveRequest newInstance() { - return new LeaveRequest(); - } + private class LeaveRequestRequestHandler implements TransportRequestHandler { @Override public void messageReceived(LeaveRequest request, TransportChannel channel) throws Exception { listener.onLeave(request.node); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } } } diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java index 370620a9b7b..18f734f7136 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPing.java @@ -43,7 +43,7 @@ public interface ZenPing extends LifecycleComponent { void setPingContextProvider(PingContextProvider contextProvider); - void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException; + void ping(PingListener listener, TimeValue timeout); public interface PingListener { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java index 18b6899c02d..b7bd539c2f7 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/ZenPingService.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen.ping; import com.google.common.collect.ImmutableList; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Nullable; @@ -71,7 +70,7 @@ public class ZenPingService extends AbstractLifecycleComponent implemen @Override public void setPingContextProvider(PingContextProvider contextProvider) { if (lifecycle.started()) { - throw new ElasticsearchIllegalStateException("Can't set nodes provider when started"); + throw new IllegalStateException("Can't set nodes provider when started"); } for (ZenPing zenPing : zenPings) { zenPing.setPingContextProvider(contextProvider); @@ -79,21 +78,21 @@ public class ZenPingService extends AbstractLifecycleComponent implemen } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { for (ZenPing zenPing : zenPings) { zenPing.start(); } } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { for (ZenPing zenPing : zenPings) { zenPing.stop(); } } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { for (ZenPing zenPing : zenPings) { zenPing.close(); } @@ -119,7 +118,7 @@ public class ZenPingService extends AbstractLifecycleComponent implemen } @Override - public void ping(PingListener listener, TimeValue timeout) throws ElasticsearchException { + public void ping(PingListener listener, TimeValue timeout) { ImmutableList zenPings = this.zenPings; CompoundPingListener compoundPingListener = new CompoundPingListener(listener, zenPings); for (ZenPing zenPing : zenPings) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java index de56bdc3730..0d5ca7260b4 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/multicast/MulticastZenPing.java @@ -21,7 +21,6 @@ package org.elasticsearch.discovery.zen.ping.multicast; import org.apache.lucene.util.Constants; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; @@ -107,19 +106,19 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem logger.debug("using group [{}], with port [{}], ttl [{}], and address [{}]", group, port, ttl, address); - this.transportService.registerHandler(ACTION_NAME, new MulticastPingResponseRequestHandler()); + this.transportService.registerRequestHandler(ACTION_NAME, MulticastPingResponse.class, ThreadPool.Names.SAME, new MulticastPingResponseRequestHandler()); } @Override public void setPingContextProvider(PingContextProvider nodesProvider) { if (lifecycle.started()) { - throw new ElasticsearchIllegalStateException("Can't set nodes provider when started"); + throw new IllegalStateException("Can't set nodes provider when started"); } this.contextProvider = nodesProvider; } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { try { // we know OSX has bugs in the JVM when creating multiple instances of multicast sockets // causing for "socket close" exceptions when receive and/or crashes @@ -138,7 +137,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { if (multicastChannel != null) { multicastChannel.close(); multicastChannel = null; @@ -146,7 +145,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } public PingResponse[] pingAndWait(TimeValue timeout) { @@ -326,13 +325,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } } - class MulticastPingResponseRequestHandler extends BaseTransportRequestHandler { - - @Override - public MulticastPingResponse newInstance() { - return new MulticastPingResponse(); - } - + class MulticastPingResponseRequestHandler implements TransportRequestHandler { @Override public void messageReceived(MulticastPingResponse request, TransportChannel channel) throws Exception { if (logger.isTraceEnabled()) { @@ -346,11 +339,6 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem } channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class MulticastPingResponse extends TransportRequest { @@ -417,7 +405,7 @@ public class MulticastZenPing extends AbstractLifecycleComponent implem .createParser(data) .mapAndClose(); } else { - throw new ElasticsearchIllegalStateException("failed multicast message, probably message from previous version"); + throw new IllegalStateException("failed multicast message, probably message from previous version"); } } if (externalPingData != null) { diff --git a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java index 5fcba119281..f9cf98f86fa 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java +++ b/src/main/java/org/elasticsearch/discovery/zen/ping/unicast/UnicastZenPing.java @@ -23,8 +23,6 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.collect.Lists; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -130,27 +128,27 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen configuredTargetNodes.add(new DiscoveryNode(UNICAST_NODE_PREFIX + unicastNodeIdGenerator.incrementAndGet() + "#", addresses[i], version.minimumCompatibilityVersion())); } } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("Failed to resolve address for [" + host + "]", e); + throw new IllegalArgumentException("Failed to resolve address for [" + host + "]", e); } } this.configuredTargetNodes = configuredTargetNodes.toArray(new DiscoveryNode[configuredTargetNodes.size()]); - transportService.registerHandler(ACTION_NAME, new UnicastPingRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, UnicastPingRequest.class, ThreadPool.Names.SAME, new UnicastPingRequestHandler()); ThreadFactory threadFactory = EsExecutors.daemonThreadFactory(settings, "[unicast_connect]"); unicastConnectExecutor = EsExecutors.newScaling(0, concurrentConnects, 60, TimeUnit.SECONDS, threadFactory); } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { transportService.removeHandler(ACTION_NAME); ThreadPool.terminate(unicastConnectExecutor, 0, TimeUnit.SECONDS); try { @@ -200,7 +198,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen } @Override - public void ping(final PingListener listener, final TimeValue timeout) throws ElasticsearchException { + public void ping(final PingListener listener, final TimeValue timeout) { final SendPingsHandler sendPingsHandler = new SendPingsHandler(pingHandlerIdGenerator.incrementAndGet()); try { receivedResponses.put(sendPingsHandler.id(), sendPingsHandler); @@ -220,11 +218,11 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen protected void doRun() throws Exception { sendPings(timeout, TimeValue.timeValueMillis(timeout.millis() / 2), sendPingsHandler); sendPingsHandler.close(); + listener.onPing(sendPingsHandler.pingCollection().toArray()); for (DiscoveryNode node : sendPingsHandler.nodeToDisconnect) { logger.trace("[{}] disconnecting from {}", sendPingsHandler.id(), node); transportService.disconnectFromNode(node); } - listener.onPing(sendPingsHandler.pingCollection().toArray()); } @Override @@ -462,7 +460,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen private UnicastPingResponse handlePingRequest(final UnicastPingRequest request) { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("received ping request while not started"); + throw new IllegalStateException("received ping request while not started"); } temporalResponses.add(request.pingResponse); threadPool.schedule(TimeValue.timeValueMillis(request.timeout.millis() * 2), ThreadPool.Names.SAME, new Runnable() { @@ -483,17 +481,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen return unicastPingResponse; } - class UnicastPingRequestHandler extends BaseTransportRequestHandler { - - @Override - public UnicastPingRequest newInstance() { - return new UnicastPingRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + class UnicastPingRequestHandler implements TransportRequestHandler { @Override public void messageReceived(UnicastPingRequest request, TransportChannel channel) throws Exception { @@ -504,9 +492,7 @@ public class UnicastZenPing extends AbstractLifecycleComponent implemen static class UnicastPingRequest extends TransportRequest { int id; - TimeValue timeout; - PingResponse pingResponse; UnicastPingRequest() { diff --git a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java index e8352f389c5..c4ad8895e79 100644 --- a/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java +++ b/src/main/java/org/elasticsearch/discovery/zen/publish/PublishClusterStateAction.java @@ -21,8 +21,12 @@ package org.elasticsearch.discovery.zen.publish; import com.google.common.collect.Maps; import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.IncompatibleClusterStateVersionException; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.compress.Compressor; @@ -40,10 +44,13 @@ import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; +import java.io.IOException; import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; /** * @@ -52,9 +59,9 @@ public class PublishClusterStateAction extends AbstractComponent { public static final String ACTION_NAME = "internal:discovery/zen/publish"; - public static interface NewClusterStateListener { + public interface NewClusterStateListener { - static interface NewStateProcessed { + interface NewStateProcessed { void onNewClusterStateProcessed(); @@ -76,80 +83,50 @@ public class PublishClusterStateAction extends AbstractComponent { this.nodesProvider = nodesProvider; this.listener = listener; this.discoverySettings = discoverySettings; - transportService.registerHandler(ACTION_NAME, new PublishClusterStateRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, BytesTransportRequest.class, ThreadPool.Names.SAME, new PublishClusterStateRequestHandler()); } public void close() { transportService.removeHandler(ACTION_NAME); } - public void publish(ClusterState clusterState, final Discovery.AckListener ackListener) { - Set nodesToPublishTo = new HashSet<>(clusterState.nodes().size()); + public void publish(ClusterChangedEvent clusterChangedEvent, final Discovery.AckListener ackListener) { + Set nodesToPublishTo = new HashSet<>(clusterChangedEvent.state().nodes().size()); DiscoveryNode localNode = nodesProvider.nodes().localNode(); - for (final DiscoveryNode node : clusterState.nodes()) { + for (final DiscoveryNode node : clusterChangedEvent.state().nodes()) { if (node.equals(localNode)) { continue; } nodesToPublishTo.add(node); } - publish(clusterState, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); + publish(clusterChangedEvent, nodesToPublishTo, new AckClusterStatePublishResponseHandler(nodesToPublishTo, ackListener)); } - private void publish(final ClusterState clusterState, final Set nodesToPublishTo, + private void publish(final ClusterChangedEvent clusterChangedEvent, final Set nodesToPublishTo, final BlockingClusterStatePublishResponseHandler publishResponseHandler) { Map serializedStates = Maps.newHashMap(); + Map serializedDiffs = Maps.newHashMap(); + final ClusterState clusterState = clusterChangedEvent.state(); + final ClusterState previousState = clusterChangedEvent.previousState(); final AtomicBoolean timedOutWaitingForNodes = new AtomicBoolean(false); final TimeValue publishTimeout = discoverySettings.getPublishTimeout(); + final boolean sendFullVersion = !discoverySettings.getPublishDiff() || previousState == null; + Diff diff = null; for (final DiscoveryNode node : nodesToPublishTo) { // try and serialize the cluster state once (or per version), so we don't serialize it // per node when we send it over the wire, compress it while we are at it... - BytesReference bytes = serializedStates.get(node.version()); - if (bytes == null) { - try { - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(node.version()); - ClusterState.Builder.writeTo(clusterState, stream); - stream.close(); - bytes = bStream.bytes(); - serializedStates.put(node.version(), bytes); - } catch (Throwable e) { - logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); - publishResponseHandler.onFailure(node, e); - continue; + // we don't send full version if node didn't exist in the previous version of cluster state + if (sendFullVersion || !previousState.nodes().nodeExists(node.id())) { + sendFullClusterState(clusterState, serializedStates, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } else { + if (diff == null) { + diff = clusterState.diff(previousState); } - } - try { - TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); - // no need to put a timeout on the options here, because we want the response to eventually be received - // and not log an error if it arrives after the timeout - transportService.sendRequest(node, ACTION_NAME, - new BytesTransportRequest(bytes, node.version()), - options, // no need to compress, we already compressed the bytes - - new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { - - @Override - public void handleResponse(TransportResponse.Empty response) { - if (timedOutWaitingForNodes.get()) { - logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); - } - publishResponseHandler.onResponse(node); - } - - @Override - public void handleException(TransportException exp) { - logger.debug("failed to send cluster state to {}", exp, node); - publishResponseHandler.onFailure(node, exp); - } - }); - } catch (Throwable t) { - logger.debug("error sending cluster state to {}", t, node); - publishResponseHandler.onFailure(node, t); + sendClusterStateDiff(clusterState, diff, serializedDiffs, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); } } @@ -171,12 +148,107 @@ public class PublishClusterStateAction extends AbstractComponent { } } - private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler { - - @Override - public BytesTransportRequest newInstance() { - return new BytesTransportRequest(); + private void sendFullClusterState(ClusterState clusterState, @Nullable Map serializedStates, + DiscoveryNode node, AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { + BytesReference bytes = null; + if (serializedStates != null) { + bytes = serializedStates.get(node.version()); } + if (bytes == null) { + try { + bytes = serializeFullClusterState(clusterState, node.version()); + if (serializedStates != null) { + serializedStates.put(node.version(), bytes); + } + } catch (Throwable e) { + logger.warn("failed to serialize cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + return; + } + } + publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, false); + } + + private void sendClusterStateDiff(ClusterState clusterState, Diff diff, Map serializedDiffs, DiscoveryNode node, + AtomicBoolean timedOutWaitingForNodes, TimeValue publishTimeout, + BlockingClusterStatePublishResponseHandler publishResponseHandler) { + BytesReference bytes = serializedDiffs.get(node.version()); + if (bytes == null) { + try { + bytes = serializeDiffClusterState(diff, node.version()); + serializedDiffs.put(node.version(), bytes); + } catch (Throwable e) { + logger.warn("failed to serialize diff of cluster_state before publishing it to node {}", e, node); + publishResponseHandler.onFailure(node, e); + return; + } + } + publishClusterStateToNode(clusterState, bytes, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler, true); + } + + private void publishClusterStateToNode(final ClusterState clusterState, BytesReference bytes, + final DiscoveryNode node, final AtomicBoolean timedOutWaitingForNodes, + final TimeValue publishTimeout, + final BlockingClusterStatePublishResponseHandler publishResponseHandler, + final boolean sendDiffs) { + try { + TransportRequestOptions options = TransportRequestOptions.options().withType(TransportRequestOptions.Type.STATE).withCompress(false); + // no need to put a timeout on the options here, because we want the response to eventually be received + // and not log an error if it arrives after the timeout + transportService.sendRequest(node, ACTION_NAME, + new BytesTransportRequest(bytes, node.version()), + options, // no need to compress, we already compressed the bytes + + new EmptyTransportResponseHandler(ThreadPool.Names.SAME) { + + @Override + public void handleResponse(TransportResponse.Empty response) { + if (timedOutWaitingForNodes.get()) { + logger.debug("node {} responded for cluster state [{}] (took longer than [{}])", node, clusterState.version(), publishTimeout); + } + publishResponseHandler.onResponse(node); + } + + @Override + public void handleException(TransportException exp) { + if (sendDiffs && exp.unwrapCause() instanceof IncompatibleClusterStateVersionException) { + logger.debug("resending full cluster state to node {} reason {}", node, exp.getDetailedMessage()); + sendFullClusterState(clusterState, null, node, timedOutWaitingForNodes, publishTimeout, publishResponseHandler); + } else { + logger.debug("failed to send cluster state to {}", exp, node); + publishResponseHandler.onFailure(node, exp); + } + } + }); + } catch (Throwable t) { + logger.warn("error sending cluster state to {}", t, node); + publishResponseHandler.onFailure(node, t); + } + } + + public static BytesReference serializeFullClusterState(ClusterState clusterState, Version nodeVersion) throws IOException { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(nodeVersion); + stream.writeBoolean(true); + clusterState.writeTo(stream); + stream.close(); + return bStream.bytes(); + } + + public static BytesReference serializeDiffClusterState(Diff diff, Version nodeVersion) throws IOException { + BytesStreamOutput bStream = new BytesStreamOutput(); + StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); + stream.setVersion(nodeVersion); + stream.writeBoolean(false); + diff.writeTo(stream); + stream.close(); + return bStream.bytes(); + } + + private class PublishClusterStateRequestHandler implements TransportRequestHandler { + private ClusterState lastSeenClusterState; @Override public void messageReceived(BytesTransportRequest request, final TransportChannel channel) throws Exception { @@ -188,11 +260,24 @@ public class PublishClusterStateAction extends AbstractComponent { in = request.bytes().streamInput(); } in.setVersion(request.version()); - ClusterState clusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); - clusterState.status(ClusterState.ClusterStateStatus.RECEIVED); - logger.debug("received cluster state version {}", clusterState.version()); + synchronized (this) { + // If true we received full cluster state - otherwise diffs + if (in.readBoolean()) { + lastSeenClusterState = ClusterState.Builder.readFrom(in, nodesProvider.nodes().localNode()); + logger.debug("received full cluster state version {} with size {}", lastSeenClusterState.version(), request.bytes().length()); + } else if (lastSeenClusterState != null) { + Diff diff = lastSeenClusterState.readDiffFrom(in); + lastSeenClusterState = diff.apply(lastSeenClusterState); + logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.uuid(), request.bytes().length()); + } else { + logger.debug("received diff for but don't have any local cluster state - requesting full state"); + throw new IncompatibleClusterStateVersionException("have no local cluster state"); + } + lastSeenClusterState.status(ClusterState.ClusterStateStatus.RECEIVED); + } + try { - listener.onNewClusterState(clusterState, new NewClusterStateListener.NewStateProcessed() { + listener.onNewClusterState(lastSeenClusterState, new NewClusterStateListener.NewStateProcessed() { @Override public void onNewClusterStateProcessed() { try { @@ -212,7 +297,7 @@ public class PublishClusterStateAction extends AbstractComponent { } }); } catch (Exception e) { - logger.warn("unexpected error while processing cluster state version [{}]", e, clusterState.version()); + logger.warn("unexpected error while processing cluster state version [{}]", e, lastSeenClusterState.version()); try { channel.sendResponse(e); } catch (Throwable e1) { @@ -220,10 +305,5 @@ public class PublishClusterStateAction extends AbstractComponent { } } } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } } diff --git a/src/main/java/org/elasticsearch/env/ESFileStore.java b/src/main/java/org/elasticsearch/env/ESFileStore.java new file mode 100644 index 00000000000..d8ffcfedc15 --- /dev/null +++ b/src/main/java/org/elasticsearch/env/ESFileStore.java @@ -0,0 +1,172 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.apache.lucene.util.Constants; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.common.io.PathUtils; + +import java.io.IOException; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttributeView; +import java.nio.file.attribute.FileStoreAttributeView; + +/** + * Implementation of FileStore that supports + * additional features, such as SSD detection and better + * filesystem information for the root filesystem. + * @see Environment#getFileStore(Path) + */ +class ESFileStore extends FileStore { + /** Underlying filestore */ + final FileStore in; + /** Cached result of Lucene's {@code IOUtils.spins} on path. */ + final Boolean spins; + + ESFileStore(FileStore in) { + this.in = in; + Boolean spins; + // Lucene's IOUtils.spins only works on Linux today: + if (Constants.LINUX) { + try { + spins = IOUtils.spins(PathUtils.get(getMountPointLinux(in))); + } catch (Exception e) { + spins = null; + } + } else { + spins = null; + } + this.spins = spins; + } + + // these are hacks that are not guaranteed + private static String getMountPointLinux(FileStore store) { + String desc = store.toString(); + int index = desc.lastIndexOf(" ("); + if (index != -1) { + return desc.substring(0, index); + } else { + return desc; + } + } + + /** Files.getFileStore(Path) useless here! Don't complain, just try it yourself. */ + static FileStore getMatchingFileStore(Path path, FileStore fileStores[]) throws IOException { + FileStore store = Files.getFileStore(path); + + if (Constants.WINDOWS) { + return store; // be defensive, don't even try to do anything fancy. + } + + try { + String mount = getMountPointLinux(store); + FileStore sameMountPoint = null; + for (FileStore fs : fileStores) { + if (mount.equals(getMountPointLinux(fs))) { + if (sameMountPoint == null) { + sameMountPoint = fs; + } else { + // more than one filesystem has the same mount point; something is wrong! + // fall back to crappy one we got from Files.getFileStore + return store; + } + } + } + + if (sameMountPoint != null) { + // ok, we found only one, use it: + return sameMountPoint; + } else { + // fall back to crappy one we got from Files.getFileStore + return store; + } + } catch (Exception e) { + // ignore + } + + // fall back to crappy one we got from Files.getFileStore + return store; + } + + @Override + public String name() { + return in.name(); + } + + @Override + public String type() { + return in.type(); + } + + @Override + public boolean isReadOnly() { + return in.isReadOnly(); + } + + @Override + public long getTotalSpace() throws IOException { + return in.getTotalSpace(); + } + + @Override + public long getUsableSpace() throws IOException { + return in.getUsableSpace(); + } + + @Override + public long getUnallocatedSpace() throws IOException { + return in.getUnallocatedSpace(); + } + + @Override + public boolean supportsFileAttributeView(Class type) { + return in.supportsFileAttributeView(type); + } + + @Override + public boolean supportsFileAttributeView(String name) { + if ("lucene".equals(name)) { + return true; + } else { + return in.supportsFileAttributeView(name); + } + } + + @Override + public V getFileStoreAttributeView(Class type) { + return in.getFileStoreAttributeView(type); + } + + @Override + public Object getAttribute(String attribute) throws IOException { + if ("lucene:spins".equals(attribute)) { + return spins; + } else { + return in.getAttribute(attribute); + } + } + + @Override + public String toString() { + return in.toString(); + } +} diff --git a/src/main/java/org/elasticsearch/env/Environment.java b/src/main/java/org/elasticsearch/env/Environment.java index 87a356774f1..cab04792b5d 100644 --- a/src/main/java/org/elasticsearch/env/Environment.java +++ b/src/main/java/org/elasticsearch/env/Environment.java @@ -20,22 +20,16 @@ package org.elasticsearch.env; import org.elasticsearch.cluster.ClusterName; -import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; -import com.google.common.base.Charsets; - -import java.io.File; import java.io.IOException; -import java.io.InputStreamReader; import java.net.MalformedURLException; -import java.net.URI; -import java.net.URISyntaxException; import java.net.URL; -import java.nio.file.*; -import java.util.Collections; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; import static org.elasticsearch.common.Strings.cleanPath; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; @@ -49,10 +43,6 @@ public class Environment { private final Path homeFile; - private final Path workFile; - - private final Path workWithClusterFile; - private final Path[] dataFiles; private final Path[] dataWithClusterFiles; @@ -63,6 +53,22 @@ public class Environment { private final Path logsFile; + /** List of filestores on the system */ + private static final FileStore[] fileStores; + + /** + * We have to do this in clinit instead of init, because ES code is pretty messy, + * and makes these environments, throws them away, makes them again, etc. + */ + static { + // gather information about filesystems + ArrayList allStores = new ArrayList<>(); + for (FileStore store : PathUtils.getDefaultFileSystem().getFileStores()) { + allStores.add(new ESFileStore(store)); + } + fileStores = allStores.toArray(new ESFileStore[allStores.size()]); + } + public Environment() { this(EMPTY_SETTINGS); } @@ -87,13 +93,6 @@ public class Environment { pluginsFile = homeFile.resolve("plugins"); } - if (settings.get("path.work") != null) { - workFile = PathUtils.get(cleanPath(settings.get("path.work"))); - } else { - workFile = homeFile.resolve("work"); - } - workWithClusterFile = workFile.resolve(ClusterName.clusterNameFromSettings(settings).value()); - String[] dataPaths = settings.getAsArray("path.data"); if (dataPaths.length > 0) { dataFiles = new Path[dataPaths.length]; @@ -128,26 +127,6 @@ public class Environment { return homeFile; } - /** - * The work location, path to temp files. - * - * Note, currently, we don't use it in ES at all, we should strive to see if we can keep it like that, - * but if we do, we have the infra for it. - */ - public Path workFile() { - return workFile; - } - - /** - * The work location with the cluster name as a sub directory. - * - * Note, currently, we don't use it in ES at all, we should strive to see if we can keep it like that, - * but if we do, we have the infra for it. - */ - public Path workWithClusterFile() { - return workWithClusterFile; - } - /** * The data location. */ @@ -177,6 +156,24 @@ public class Environment { return logsFile; } + /** + * Looks up the filestore associated with a Path. + *

+ * This is an enhanced version of {@link Files#getFileStore(Path)}: + *

    + *
  • On *nix systems, the store returned for the root filesystem will contain + * the actual filesystem type (e.g. {@code ext4}) instead of {@code rootfs}. + *
  • On some systems, the custom attribute {@code lucene:spins} is supported + * via the {@link FileStore#getAttribute(String)} method. + *
  • Only requires the security permissions of {@link Files#getFileStore(Path)}, + * no permissions to the actual mount point are required. + *
  • Exception handling has the same semantics as {@link Files#getFileStore(Path)}. + *
+ */ + public FileStore getFileStore(Path path) throws IOException { + return ESFileStore.getMatchingFileStore(path, fileStores); + } + public URL resolveConfig(String path) throws FailedToResolveConfigException { String origPath = path; // first, try it as a path on the file system diff --git a/src/main/java/org/elasticsearch/env/NodeEnvironment.java b/src/main/java/org/elasticsearch/env/NodeEnvironment.java index c2c6755ecdc..134a15e3f7d 100644 --- a/src/main/java/org/elasticsearch/env/NodeEnvironment.java +++ b/src/main/java/org/elasticsearch/env/NodeEnvironment.java @@ -22,10 +22,7 @@ package org.elasticsearch.env; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; import org.apache.lucene.store.*; -import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; @@ -64,23 +61,15 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { * not running on Linux, or we hit an exception trying), True means the device possibly spins and False means it does not. */ public final Boolean spins; - public NodePath(Path path) throws IOException { + public NodePath(Path path, Environment environment) throws IOException { this.path = path; this.indicesPath = path.resolve(INDICES_FOLDER); - this.fileStore = getFileStore(path); - Boolean spins; - - // Lucene's IOUtils.spins only works on Linux today: - if (Constants.LINUX) { - try { - spins = IOUtils.spins(path); - } catch (Exception e) { - spins = null; - } + this.fileStore = environment.getFileStore(path); + if (fileStore.supportsFileAttributeView("lucene")) { + this.spins = (Boolean) fileStore.getAttribute("lucene:spins"); } else { - spins = null; + this.spins = null; } - this.spins = spins; } /** @@ -157,7 +146,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { Lock tmpLock = luceneDir.makeLock(NODE_LOCK_FILENAME); boolean obtained = tmpLock.obtain(); if (obtained) { - nodePaths[dirIndex] = new NodePath(dir); + nodePaths[dirIndex] = new NodePath(dir, environment); locks[dirIndex] = tmpLock; localNodeId = possibleLockId; } else { @@ -181,7 +170,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { } if (locks[0] == null) { - throw new ElasticsearchIllegalStateException("Failed to obtain node lock, is the following location writable?: " + throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: " + Arrays.toString(environment.dataWithClusterFiles()), lastException); } @@ -289,58 +278,6 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { return b.toString(); } - - // TODO: move somewhere more "util"? But, this is somewhat hacky code ... not great to publicize it any more: - - // NOTE: poached from Lucene's IOUtils: - - /** Files.getFileStore(Path) useless here! Don't complain, just try it yourself. */ - private static FileStore getFileStore(Path path) throws IOException { - FileStore store = Files.getFileStore(path); - - try { - String mount = getMountPoint(store); - FileStore sameMountPoint = null; - for (FileStore fs : path.getFileSystem().getFileStores()) { - if (mount.equals(getMountPoint(fs))) { - if (sameMountPoint == null) { - sameMountPoint = fs; - } else { - // more than one filesystem has the same mount point; something is wrong! - // fall back to crappy one we got from Files.getFileStore - return store; - } - } - } - - if (sameMountPoint != null) { - // ok, we found only one, use it: - return sameMountPoint; - } else { - // fall back to crappy one we got from Files.getFileStore - return store; - } - } catch (Exception e) { - // ignore - } - - // fall back to crappy one we got from Files.getFileStore - return store; - } - - // NOTE: poached from Lucene's IOUtils: - - // these are hacks that are not guaranteed - private static String getMountPoint(FileStore store) { - String desc = store.toString(); - int index = desc.lastIndexOf(" ("); - if (index != -1) { - return desc.substring(0, index); - } else { - return desc; - } - } - /** * Deletes a shard data directory iff the shards locks were successfully acquired. * @@ -597,12 +534,12 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { /** * Returns an array of all of the nodes data locations. - * @throws org.elasticsearch.ElasticsearchIllegalStateException if the node is not configured to store local locations + * @throws IllegalStateException if the node is not configured to store local locations */ public Path[] nodeDataPaths() { assert assertEnvIsLocked(); if (nodePaths == null || locks == null) { - throw new ElasticsearchIllegalStateException("node is not configured to store local location"); + throw new IllegalStateException("node is not configured to store local location"); } Path[] paths = new Path[nodePaths.length]; for(int i=0;i findAllIndices() throws IOException { if (nodePaths == null || locks == null) { - throw new ElasticsearchIllegalStateException("node is not configured to store local location"); + throw new IllegalStateException("node is not configured to store local location"); } assert assertEnvIsLocked(); Set indices = Sets.newHashSet(); @@ -704,7 +641,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { /** * This method tries to write an empty file and moves it using an atomic move operation. - * This method throws an {@link ElasticsearchIllegalStateException} if this operation is + * This method throws an {@link IllegalStateException} if this operation is * not supported by the filesystem. This test is executed on each of the data directories. * This method cleans up all files even in the case of an error. */ @@ -718,7 +655,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { try { Files.move(src, target, StandardCopyOption.ATOMIC_MOVE); } catch (AtomicMoveNotSupportedException ex) { - throw new ElasticsearchIllegalStateException("atomic_move is not supported by the filesystem on path [" + throw new IllegalStateException("atomic_move is not supported by the filesystem on path [" + nodePath.path + "] atomic_move is required for elasticsearch to work correctly.", ex); } finally { @@ -764,7 +701,7 @@ public class NodeEnvironment extends AbstractComponent implements Closeable { return PathUtils.get(customDataDir); } } else { - throw new ElasticsearchIllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); + throw new IllegalArgumentException("no custom " + IndexMetaData.SETTING_DATA_PATH + " setting available"); } } diff --git a/src/main/java/org/elasticsearch/gateway/Gateway.java b/src/main/java/org/elasticsearch/gateway/Gateway.java index cd15bccdc4a..139b5763489 100644 --- a/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -31,7 +31,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.indices.IndicesService; + import java.nio.file.Path; diff --git a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 34503b08ad8..158a3df5d91 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -20,7 +20,6 @@ package org.elasticsearch.gateway; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; @@ -175,7 +174,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } final String name = stateFile.getFileName().toString(); if (name.startsWith("metadata-")) { - throw new ElasticsearchIllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before " + throw new IllegalStateException("Detected pre 0.19 metadata file please upgrade to a version before " + Version.CURRENT.minimumCompatibilityVersion() + " first to upgrade state structures - metadata found: [" + stateFile.getParent().toAbsolutePath()); } @@ -225,7 +224,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL } else if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0)) { if (indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION) != null || indexMetaData.getSettings().get(IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE) != null) { - throw new ElasticsearchIllegalStateException("Indices created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION + throw new IllegalStateException("Indices created on or after 2.0 should NOT contain [" + IndexMetaData.SETTING_LEGACY_ROUTING_HASH_FUNCTION + "] + or [" + IndexMetaData.SETTING_LEGACY_ROUTING_USE_TYPE + "] in their index settings"); } } @@ -244,7 +243,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL if (Files.exists(stateLocation)) { try (DirectoryStream stream = Files.newDirectoryStream(stateLocation, "shards-*")) { for (Path stateFile : stream) { - throw new ElasticsearchIllegalStateException("Detected pre 0.19 shard state file please upgrade to a version before " + throw new IllegalStateException("Detected pre 0.19 shard state file please upgrade to a version before " + Version.CURRENT.minimumCompatibilityVersion() + " first to upgrade state structures - shard state found: [" + stateFile.getParent().toAbsolutePath()); } diff --git a/src/main/java/org/elasticsearch/gateway/GatewayService.java b/src/main/java/org/elasticsearch/gateway/GatewayService.java index add9ea49f9c..da0b2bcabf8 100644 --- a/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -102,7 +102,7 @@ public class GatewayService extends AbstractLifecycleComponent i } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { clusterService.addLast(this); // if we received initial state, see if we can recover within the start phase, so we hold the // node from starting until we recovered properly @@ -117,12 +117,12 @@ public class GatewayService extends AbstractLifecycleComponent i } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { clusterService.remove(this); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 622652d1c56..5538ef6d043 100644 --- a/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -60,7 +60,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { this.transportService = transportService; this.clusterService = clusterService; this.allocationService = allocationService; - transportService.registerHandler(ACTION_NAME, new AllocateDangledRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, AllocateDangledRequest.class, ThreadPool.Names.SAME, new AllocateDangledRequestHandler()); } public void allocateDangled(Collection indices, final Listener listener) { @@ -100,13 +100,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { void onFailure(Throwable e); } - class AllocateDangledRequestHandler extends BaseTransportRequestHandler { - - @Override - public AllocateDangledRequest newInstance() { - return new AllocateDangledRequest(); - } - + class AllocateDangledRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel) throws Exception { String[] indexNames = new String[request.indices.length]; @@ -173,11 +167,6 @@ public class LocalAllocateDangledIndices extends AbstractComponent { } }); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } static class AllocateDangledRequest extends TransportRequest { @@ -209,7 +198,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent { fromNode.writeTo(out); out.writeVInt(indices.length); for (IndexMetaData indexMetaData : indices) { - IndexMetaData.Builder.writeTo(indexMetaData, out); + indexMetaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java index 8bfbc5a4e06..9d88d84f64a 100644 --- a/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java +++ b/src/main/java/org/elasticsearch/gateway/MetaDataStateFormat.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.gateway; +import com.google.common.base.Preconditions; import com.google.common.base.Predicate; import com.google.common.collect.Collections2; import org.apache.lucene.codecs.CodecUtil; @@ -26,9 +27,7 @@ import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.*; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; import org.elasticsearch.common.xcontent.*; @@ -300,7 +299,7 @@ public abstract class MetaDataStateFormat { ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptions); if (files.size() > 0) { // We have some state files but none of them gave us a usable state - throw new ElasticsearchIllegalStateException("Could not find a state file to recover from among " + files); + throw new IllegalStateException("Could not find a state file to recover from among " + files); } return state; } diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 5f5c73873e7..900a2e7ffc7 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -53,7 +53,8 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA @Inject public TransportNodesListGatewayMetaState(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); } TransportNodesListGatewayMetaState init(GatewayMetaState metaState) { @@ -65,26 +66,11 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA return execute(new Request(nodesIds).timeout(timeout)); } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - @Override protected boolean transportCompress() { return true; // compress since the metadata can become large } - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); @@ -114,7 +100,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA } @Override - protected NodeGatewayMetaState nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeGatewayMetaState nodeOperation(NodeRequest request) { try { return new NodeGatewayMetaState(clusterService.localNode(), metaState.loadMetaState()); } catch (Exception e) { @@ -235,7 +221,7 @@ public class TransportNodesListGatewayMetaState extends TransportNodesOperationA out.writeBoolean(false); } else { out.writeBoolean(true); - MetaData.Builder.writeTo(metaData, out); + metaData.writeTo(out); } } } diff --git a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 87edb9cc3c1..04b606cb09a 100644 --- a/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -55,7 +55,8 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesOperat @Inject public TransportNodesListGatewayStartedShards(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, ActionFilters actionFilters, NodeEnvironment env) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); this.nodeEnv = env; } @@ -63,26 +64,11 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesOperat return execute(new Request(shardId, indexUUID, nodesIds).timeout(timeout)); } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - @Override protected boolean transportCompress() { return true; // this can become big... } - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); @@ -112,7 +98,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesOperat } @Override - protected NodeGatewayStartedShards nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { try { final ShardId shardId = request.getShardId(); final String indexUUID = request.getIndexUUID(); diff --git a/src/main/java/org/elasticsearch/http/HttpServer.java b/src/main/java/org/elasticsearch/http/HttpServer.java index 6d43053e408..a055196b54b 100644 --- a/src/main/java/org/elasticsearch/http/HttpServer.java +++ b/src/main/java/org/elasticsearch/http/HttpServer.java @@ -88,7 +88,7 @@ public class HttpServer extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { transport.start(); if (logger.isInfoEnabled()) { logger.info("{}", transport.boundAddress()); @@ -97,13 +97,13 @@ public class HttpServer extends AbstractLifecycleComponent { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { nodeService.removeAttribute("http_address"); transport.stop(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { transport.close(); } @@ -177,11 +177,13 @@ public class HttpServer extends AbstractLifecycleComponent { sitePath = sitePath.replace("/", separator); // this is a plugin provided site, serve it as static files from the plugin location Path file = FileSystemUtils.append(siteFile, PathUtils.get(sitePath), 0); - if (!Files.exists(file) || Files.isHidden(file)) { + + // return not found instead of forbidden to prevent malicious requests to find out if files exist or dont exist + if (!Files.exists(file) || Files.isHidden(file) || !file.toAbsolutePath().normalize().startsWith(siteFile.toAbsolutePath())) { channel.sendResponse(new BytesRestResponse(NOT_FOUND)); return; } - + BasicFileAttributes attributes = Files.readAttributes(file, BasicFileAttributes.class); if (!attributes.isRegularFile()) { // If it's not a dir, we send a 403 @@ -196,10 +198,7 @@ public class HttpServer extends AbstractLifecycleComponent { return; } } - if (!file.toAbsolutePath().startsWith(siteFile.toAbsolutePath())) { - channel.sendResponse(new BytesRestResponse(FORBIDDEN)); - return; - } + try { byte[] data = Files.readAllBytes(file); channel.sendResponse(new BytesRestResponse(OK, guessMimeType(sitePath), data)); diff --git a/src/main/java/org/elasticsearch/http/HttpServerModule.java b/src/main/java/org/elasticsearch/http/HttpServerModule.java index e197c3afbfd..df0be6bcf69 100644 --- a/src/main/java/org/elasticsearch/http/HttpServerModule.java +++ b/src/main/java/org/elasticsearch/http/HttpServerModule.java @@ -19,14 +19,12 @@ package org.elasticsearch.http; +import com.google.common.base.Preconditions; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.netty.NettyHttpServerTransport; -import org.elasticsearch.plugins.Plugin; - -import static org.elasticsearch.common.Preconditions.checkNotNull; /** * @@ -60,8 +58,8 @@ public class HttpServerModule extends AbstractModule { } public void setHttpServerTransport(Class httpServerTransport, String source) { - checkNotNull(httpServerTransport, "Configured http server transport may not be null"); - checkNotNull(source, "Plugin, that changes transport may not be null"); + Preconditions.checkNotNull(httpServerTransport, "Configured http server transport may not be null"); + Preconditions.checkNotNull(source, "Plugin, that changes transport may not be null"); this.configuredHttpServerTransport = httpServerTransport; this.configuredHttpServerTransportSource = source; } diff --git a/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java index 693e5049f58..a727d3fed0f 100644 --- a/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java +++ b/src/main/java/org/elasticsearch/http/netty/NettyHttpServerTransport.java @@ -209,7 +209,7 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent getWordList(Environment env, Settings settings, String settingPrefix) { @@ -237,7 +232,7 @@ public class Analysis { return loadWordList(reader, "#"); } catch (IOException ioe) { String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage()); - throw new ElasticsearchIllegalArgumentException(message); + throw new IllegalArgumentException(message); } } @@ -268,7 +263,7 @@ public class Analysis { /** * @return null If no settings set for "settingsPrefix" then return null. - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException + * @throws IllegalArgumentException * If the Reader can not be instantiated. */ public static Reader getReaderFromFile(Environment env, Settings settings, String settingPrefix) { @@ -284,7 +279,7 @@ public class Analysis { return FileSystemUtils.newBufferedReader(fileUrl, Charsets.UTF_8); } catch (IOException ioe) { String message = String.format(Locale.ROOT, "IOException while reading %s_path: %s", settingPrefix, ioe.getMessage()); - throw new ElasticsearchIllegalArgumentException(message); + throw new IllegalArgumentException(message); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java index 0bd527a2c22..3b351567456 100644 --- a/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java +++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisModule.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; @@ -186,12 +185,12 @@ public class AnalysisModule extends AbstractModule { } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("failed to find char filter type [" + charFilterSettings.get("type") + "] for [" + charFilterName + "]", e); + throw new IllegalArgumentException("failed to find char filter type [" + charFilterSettings.get("type") + "] for [" + charFilterName + "]", e); } } if (type == null) { // nothing found, see if its in bindings as a binding name - throw new ElasticsearchIllegalArgumentException("Char Filter [" + charFilterName + "] must have a type associated with it"); + throw new IllegalArgumentException("Char Filter [" + charFilterName + "] must have a type associated with it"); } charFilterBinder.addBinding(charFilterName).toProvider(FactoryProvider.newFactory(CharFilterFactoryFactory.class, type)).in(Scopes.SINGLETON); } @@ -246,11 +245,11 @@ public class AnalysisModule extends AbstractModule { } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("failed to find token filter type [" + tokenFilterSettings.get("type") + "] for [" + tokenFilterName + "]", e); + throw new IllegalArgumentException("failed to find token filter type [" + tokenFilterSettings.get("type") + "] for [" + tokenFilterName + "]", e); } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("token filter [" + tokenFilterName + "] must have a type associated with it"); + throw new IllegalArgumentException("token filter [" + tokenFilterName + "] must have a type associated with it"); } tokenFilterBinder.addBinding(tokenFilterName).toProvider(FactoryProvider.newFactory(TokenFilterFactoryFactory.class, type)).in(Scopes.SINGLETON); } @@ -305,11 +304,11 @@ public class AnalysisModule extends AbstractModule { } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("failed to find tokenizer type [" + tokenizerSettings.get("type") + "] for [" + tokenizerName + "]", e); + throw new IllegalArgumentException("failed to find tokenizer type [" + tokenizerSettings.get("type") + "] for [" + tokenizerName + "]", e); } } if (type == null) { - throw new ElasticsearchIllegalArgumentException("token filter [" + tokenizerName + "] must have a type associated with it"); + throw new IllegalArgumentException("token filter [" + tokenizerName + "] must have a type associated with it"); } tokenizerBinder.addBinding(tokenizerName).toProvider(FactoryProvider.newFactory(TokenizerFactoryFactory.class, type)).in(Scopes.SINGLETON); } @@ -369,7 +368,7 @@ public class AnalysisModule extends AbstractModule { // we have a tokenizer, use the CustomAnalyzer type = CustomAnalyzerProvider.class; } else { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]", e); + throw new IllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]", e); } } } @@ -380,7 +379,7 @@ public class AnalysisModule extends AbstractModule { // we have a tokenizer, use the CustomAnalyzer type = CustomAnalyzerProvider.class; } else { - throw new ElasticsearchIllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]"); + throw new IllegalArgumentException("failed to find analyzer type [" + analyzerSettings.get("type") + "] or tokenizer for [" + analyzerName + "]"); } } analyzerBinder.addBinding(analyzerName).toProvider(FactoryProvider.newFactory(AnalyzerProviderFactory.class, type)).in(Scopes.SINGLETON); diff --git a/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java b/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java index 3fe8cbd27e5..dbbe01ae38f 100644 --- a/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java +++ b/src/main/java/org/elasticsearch/index/analysis/AnalysisService.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import com.google.common.collect.ImmutableMap; import org.apache.lucene.analysis.Analyzer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; @@ -222,7 +221,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable } Analyzer analyzerF = analyzerFactory.get(); if (analyzerF == null) { - throw new ElasticsearchIllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer"); + throw new IllegalArgumentException("analyzer [" + analyzerFactory.name() + "] created null analyzer"); } NamedAnalyzer analyzer; // if we got a named analyzer back, use it... @@ -247,7 +246,7 @@ public class AnalysisService extends AbstractIndexComponent implements Closeable defaultAnalyzer = analyzers.get("default"); if (defaultAnalyzer == null) { - throw new ElasticsearchIllegalArgumentException("no default analyzer configured"); + throw new IllegalArgumentException("no default analyzer configured"); } defaultIndexAnalyzer = analyzers.containsKey("default_index") ? analyzers.get("default_index") : analyzers.get("default"); defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : analyzers.get("default"); diff --git a/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java index fe56627554f..93a3e0f21bf 100644 --- a/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java +++ b/src/main/java/org/elasticsearch/index/analysis/ChineseAnalyzerProvider.java @@ -19,9 +19,7 @@ package org.elasticsearch.index.analysis; -import org.apache.lucene.util.Version; import org.apache.lucene.analysis.standard.StandardAnalyzer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; diff --git a/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java index 3215bec1d23..5520a3c0e64 100644 --- a/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/CommonGramsTokenFilterFactory.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * @@ -51,7 +50,7 @@ public class CommonGramsTokenFilterFactory extends AbstractTokenFilterFactory { this.words = Analysis.parseCommonWords(env, settings, null, ignoreCase); if (this.words == null) { - throw new ElasticsearchIllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter"); + throw new IllegalArgumentException("mising or empty [common_words] or [common_words_path] configuration for common_grams token filter"); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java index 6b14aad567e..c98308d1ab1 100644 --- a/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/EdgeNGramTokenizerFactory.java @@ -24,16 +24,13 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.Lucene43EdgeNGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.metadata.IndexMetaData; + import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; -import java.io.Reader; - import static org.elasticsearch.index.analysis.NGramTokenizerFactory.parseTokenChars; /** @@ -71,7 +68,7 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { * Yet if somebody uses 0.90.2 or higher with a prev. lucene version we should also use the deprecated version. */ if (side == Lucene43EdgeNGramTokenizer.Side.BACK) { - throw new ElasticsearchIllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException("side=back is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.2) but beware that it might cause highlighting bugs." + " To obtain the same behavior as the previous version please use \"edgeNGram\" filter which still supports side=back" + " in combination with a \"keyword\" tokenizer"); @@ -91,4 +88,4 @@ public class EdgeNGramTokenizerFactory extends AbstractTokenizerFactory { return new Lucene43EdgeNGramTokenizer(side, minGram, maxGram); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java index 6bdba6617f4..4281f959c16 100644 --- a/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/HunspellTokenFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.hunspell.Dictionary; import org.apache.lucene.analysis.hunspell.HunspellStemFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -44,12 +43,12 @@ public class HunspellTokenFilterFactory extends AbstractTokenFilterFactory { String locale = settings.get("locale", settings.get("language", settings.get("lang", null))); if (locale == null) { - throw new ElasticsearchIllegalArgumentException("missing [locale | language | lang] configuration for hunspell token filter"); + throw new IllegalArgumentException("missing [locale | language | lang] configuration for hunspell token filter"); } dictionary = hunspellService.getDictionary(locale); if (dictionary == null) { - throw new ElasticsearchIllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale)); + throw new IllegalArgumentException(String.format(Locale.ROOT, "Unknown hunspell dictionary for locale [%s]", locale)); } dedup = settings.getAsBoolean("dedup", true); diff --git a/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java index 47e03fdcbf0..6b8e81f4953 100644 --- a/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/KeepTypesFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.TypeTokenFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -56,7 +55,7 @@ public class KeepTypesFilterFactory extends AbstractTokenFilterFactory { final String[] arrayKeepTypes = settings.getAsArray(KEEP_TYPES_KEY, null); if ((arrayKeepTypes == null)) { - throw new ElasticsearchIllegalArgumentException("keep_types requires `" + KEEP_TYPES_KEY + "` to be configured"); + throw new IllegalArgumentException("keep_types requires `" + KEEP_TYPES_KEY + "` to be configured"); } this.keepTypes = new HashSet<>(Arrays.asList(arrayKeepTypes)); diff --git a/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java index e2605d8ea23..a92ade2467b 100644 --- a/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/KeepWordFilterFactory.java @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.miscellaneous.KeepWordFilter; import org.apache.lucene.analysis.miscellaneous.Lucene43KeepWordFilter; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -75,11 +74,11 @@ public class KeepWordFilterFactory extends AbstractTokenFilterFactory { final String keepWordsPath = settings.get(KEEP_WORDS_PATH_KEY, null); if ((arrayKeepWords == null && keepWordsPath == null) || (arrayKeepWords != null && keepWordsPath != null)) { // we don't allow both or none - throw new ElasticsearchIllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + throw new IllegalArgumentException("keep requires either `" + KEEP_WORDS_KEY + "` or `" + KEEP_WORDS_PATH_KEY + "` to be configured"); } if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { - throw new ElasticsearchIllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); diff --git a/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java index 73f41cdc305..bc867218741 100644 --- a/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/KeywordMarkerTokenFilterFactory.java @@ -24,7 +24,6 @@ import java.util.Set; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -44,7 +43,7 @@ public class KeywordMarkerTokenFilterFactory extends AbstractTokenFilterFactory boolean ignoreCase = settings.getAsBoolean("ignore_case", false); Set rules = Analysis.getWordSet(env, settings, "keywords"); if (rules == null) { - throw new ElasticsearchIllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured"); + throw new IllegalArgumentException("keyword filter requires either `keywords` or `keywords_path` to be configured"); } keywordLookup = new CharArraySet(rules, ignoreCase); } diff --git a/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java index aa9868a4fef..22862ff82f9 100644 --- a/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/LengthTokenFilterFactory.java @@ -23,7 +23,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.LengthFilter; import org.apache.lucene.analysis.miscellaneous.Lucene43LengthFilter; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -46,7 +45,7 @@ public class LengthTokenFilterFactory extends AbstractTokenFilterFactory { min = settings.getAsInt("min", 0); max = settings.getAsInt("max", Integer.MAX_VALUE); if (version.onOrAfter(Version.LUCENE_4_4) && settings.get(ENABLE_POS_INC_KEY) != null) { - throw new ElasticsearchIllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException(ENABLE_POS_INC_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } enablePositionIncrements = version.onOrAfter(Version.LUCENE_4_4) ? true : settings.getAsBoolean(ENABLE_POS_INC_KEY, true); diff --git a/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java index 8c08b82a7bd..eb99bef6e63 100644 --- a/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/LowerCaseTokenFilterFactory.java @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.core.LowerCaseFilter; import org.apache.lucene.analysis.el.GreekLowerCaseFilter; import org.apache.lucene.analysis.ga.IrishLowerCaseFilter; import org.apache.lucene.analysis.tr.TurkishLowerCaseFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -61,7 +60,7 @@ public class LowerCaseTokenFilterFactory extends AbstractTokenFilterFactory { } else if (lang.equalsIgnoreCase("turkish")) { return new TurkishLowerCaseFilter(tokenStream); } else { - throw new ElasticsearchIllegalArgumentException("language [" + lang + "] not support for lower case"); + throw new IllegalArgumentException("language [" + lang + "] not support for lower case"); } } } diff --git a/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java index 3b298e65e56..3bacfe4dff4 100644 --- a/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/MappingCharFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.charfilter.MappingCharFilter; import org.apache.lucene.analysis.charfilter.NormalizeCharMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -45,7 +44,7 @@ public class MappingCharFilterFactory extends AbstractCharFilterFactory { List rules = Analysis.getWordList(env, settings, "mappings"); if (rules == null) { - throw new ElasticsearchIllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); + throw new IllegalArgumentException("mapping requires either `mappings` or `mappings_path` to be configured"); } NormalizeCharMap.Builder normMapBuilder = new NormalizeCharMap.Builder(); diff --git a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java index 99da029a98e..b884095ae74 100644 --- a/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/NGramTokenizerFactory.java @@ -24,15 +24,13 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.Lucene43NGramTokenizer; import org.apache.lucene.analysis.ngram.NGramTokenizer; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.cluster.metadata.IndexMetaData; + import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.settings.IndexSettings; -import java.io.Reader; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.util.Locale; @@ -83,7 +81,7 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory { characterClass = characterClass.toLowerCase(Locale.ROOT).trim(); CharMatcher matcher = MATCHERS.get(characterClass); if (matcher == null) { - throw new ElasticsearchIllegalArgumentException("Unknown token type: '" + characterClass + "', must be one of " + MATCHERS.keySet()); + throw new IllegalArgumentException("Unknown token type: '" + characterClass + "', must be one of " + MATCHERS.keySet()); } builder.or(matcher); } @@ -123,4 +121,4 @@ public class NGramTokenizerFactory extends AbstractTokenizerFactory { } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java b/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java index fb1fda8ac9d..e1b58b3e376 100644 --- a/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/PathHierarchyTokenizerFactory.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.path.PathHierarchyTokenizer; import org.apache.lucene.analysis.path.ReversePathHierarchyTokenizer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -46,7 +45,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { if (delimiter == null) { this.delimiter = PathHierarchyTokenizer.DEFAULT_DELIMITER; } else if (delimiter.length() > 1) { - throw new ElasticsearchIllegalArgumentException("delimiter can only be a one char value"); + throw new IllegalArgumentException("delimiter can only be a one char value"); } else { this.delimiter = delimiter.charAt(0); } @@ -55,7 +54,7 @@ public class PathHierarchyTokenizerFactory extends AbstractTokenizerFactory { if (replacement == null) { this.replacement = this.delimiter; } else if (replacement.length() > 1) { - throw new ElasticsearchIllegalArgumentException("replacement can only be a one char value"); + throw new IllegalArgumentException("replacement can only be a one char value"); } else { this.replacement = replacement.charAt(0); } diff --git a/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java b/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java index 1996aff8c92..e6205596316 100644 --- a/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java +++ b/src/main/java/org/elasticsearch/index/analysis/PatternAnalyzerProvider.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.core.StopAnalyzer; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; @@ -57,7 +56,7 @@ public class PatternAnalyzerProvider extends AbstractIndexAnalyzerProvider rules = Analysis.getWordList(env, settings, "rules"); if (rules == null) { - throw new ElasticsearchIllegalArgumentException("stemmer override filter requires either `rules` or `rules_path` to be configured"); + throw new IllegalArgumentException("stemmer override filter requires either `rules` or `rules_path` to be configured"); } StemmerOverrideFilter.Builder builder = new StemmerOverrideFilter.Builder(false); diff --git a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java index 745044d0019..2f2abd9aa1e 100644 --- a/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/StopTokenFilterFactory.java @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.core.StopFilter; import org.apache.lucene.analysis.util.CharArraySet; import org.apache.lucene.search.suggest.analyzing.SuggestStopFilter; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -56,7 +55,7 @@ public class StopTokenFilterFactory extends AbstractTokenFilterFactory { this.removeTrailing = settings.getAsBoolean("remove_trailing", true); this.stopWords = Analysis.parseStopWords(env, settings, StopAnalyzer.ENGLISH_STOP_WORDS_SET, ignoreCase); if (version.onOrAfter(Version.LUCENE_4_4) && settings.get("enable_position_increments") != null) { - throw new ElasticsearchIllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." + throw new IllegalArgumentException("enable_position_increments is not supported anymore as of Lucene 4.4 as it can create broken token streams." + " Please fix your analysis chain or use an older compatibility version (<= 4.3)."); } this.enablePositionIncrements = settings.getAsBoolean("enable_position_increments", true); diff --git a/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java index 87ee3e9adf2..646e3d9b6ea 100644 --- a/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/SynonymTokenFilterFactory.java @@ -28,11 +28,9 @@ import org.apache.lucene.analysis.synonym.SolrSynonymParser; import org.apache.lucene.analysis.synonym.SynonymFilter; import org.apache.lucene.analysis.synonym.SynonymMap; import org.apache.lucene.analysis.synonym.WordnetSynonymParser; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.io.FastStringReader; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -66,7 +64,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { } else if (settings.get("synonyms_path") != null) { rulesReader = Analysis.getReaderFromFile(env, settings, "synonyms_path"); } else { - throw new ElasticsearchIllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured"); + throw new IllegalArgumentException("synonym requires either `synonyms` or `synonyms_path` to be configured"); } this.ignoreCase = settings.getAsBoolean("ignore_case", false); @@ -79,7 +77,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { tokenizerFactoryFactory = indicesAnalysisService.tokenizerFactoryFactory(tokenizerName); } if (tokenizerFactoryFactory == null) { - throw new ElasticsearchIllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); + throw new IllegalArgumentException("failed to find tokenizer [" + tokenizerName + "] for synonym token filter"); } final TokenizerFactory tokenizerFactory = tokenizerFactoryFactory.create(tokenizerName, ImmutableSettings.builder().put(indexSettings).put(settings).build()); @@ -106,7 +104,7 @@ public class SynonymTokenFilterFactory extends AbstractTokenFilterFactory { synonymMap = parser.build(); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to build synonyms", e); + throw new IllegalArgumentException("failed to build synonyms", e); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java index a3d0d4b0fb5..9e54e6f16ee 100644 --- a/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/TrimTokenFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.miscellaneous.Lucene43TrimFilter; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.TrimFilter; @@ -44,7 +43,7 @@ public class TrimTokenFilterFactory extends AbstractTokenFilterFactory { public TrimTokenFilterFactory(Index index, @IndexSettings Settings indexSettings, Environment env, @Assisted String name, @Assisted Settings settings) { super(index, indexSettings, name, settings); if (version.onOrAfter(Version.LUCENE_4_4_0) && settings.get(UPDATE_OFFSETS_KEY) != null) { - throw new ElasticsearchIllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" + throw new IllegalArgumentException(UPDATE_OFFSETS_KEY + " is not supported anymore. Please fix your analysis chain or use" + " an older compatibility version (<=4.3) but beware that it might cause highlighting bugs."); } this.updateOffsets = settings.getAsBoolean("update_offsets", false); diff --git a/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java index 69a37bc0a6b..8e4db15cd7d 100644 --- a/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/TruncateTokenFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.TruncateTokenFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -42,7 +41,7 @@ public class TruncateTokenFilterFactory extends AbstractTokenFilterFactory { super(index, indexSettings, name, settings); this.length = settings.getAsInt("length", -1); if (length <= 0) { - throw new ElasticsearchIllegalArgumentException("length parameter must be provided"); + throw new IllegalArgumentException("length parameter must be provided"); } } diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java index 0d5ceb77a99..b9c296df22b 100644 --- a/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/compound/AbstractCompoundWordTokenFilterFactory.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis.compound; import org.apache.lucene.analysis.compound.CompoundWordTokenFilterBase; import org.apache.lucene.analysis.util.CharArraySet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -52,7 +51,7 @@ public abstract class AbstractCompoundWordTokenFilterFactory extends AbstractTok onlyLongestMatch = settings.getAsBoolean("only_longest_match", false); wordList = Analysis.getWordSet(env, settings, "word_list"); if (wordList == null) { - throw new ElasticsearchIllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly"); + throw new IllegalArgumentException("word_list must be provided for [" + name + "], either as a path to a file, or directly"); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java b/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java index acea0c2abc8..4224c9cc15d 100644 --- a/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java +++ b/src/main/java/org/elasticsearch/index/analysis/compound/HyphenationCompoundWordTokenFilterFactory.java @@ -25,7 +25,6 @@ import org.apache.lucene.analysis.compound.Lucene43HyphenationCompoundWordTokenF import org.apache.lucene.analysis.compound.hyphenation.HyphenationTree; import org.apache.lucene.util.Version; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; import org.elasticsearch.common.settings.Settings; @@ -36,8 +35,6 @@ import org.elasticsearch.index.settings.IndexSettings; import org.xml.sax.InputSource; import java.net.URL; -import java.nio.file.Files; -import java.nio.file.Path; /** * Uses the {@link org.apache.lucene.analysis.compound.HyphenationCompoundWordTokenFilter} to decompound tokens based on hyphenation rules. @@ -55,7 +52,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW String hyphenationPatternsPath = settings.get("hyphenation_patterns_path", null); if (hyphenationPatternsPath == null) { - throw new ElasticsearchIllegalArgumentException("hyphenation_patterns_path is a required setting."); + throw new IllegalArgumentException("hyphenation_patterns_path is a required setting."); } URL hyphenationPatternsFile = env.resolveConfig(hyphenationPatternsPath); @@ -63,7 +60,7 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW try { hyphenationTree = HyphenationCompoundWordTokenFilter.getHyphenationTree(new InputSource(hyphenationPatternsFile.toExternalForm())); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("Exception while reading hyphenation_patterns_path: " + e.getMessage()); + throw new IllegalArgumentException("Exception while reading hyphenation_patterns_path: " + e.getMessage()); } } @@ -77,4 +74,4 @@ public class HyphenationCompoundWordTokenFilterFactory extends AbstractCompoundW minSubwordSize, maxSubwordSize, onlyLongestMatch); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCache.java b/src/main/java/org/elasticsearch/index/cache/IndexCache.java index 76316b39872..3b71f735c2e 100644 --- a/src/main/java/org/elasticsearch/index/cache/IndexCache.java +++ b/src/main/java/org/elasticsearch/index/cache/IndexCache.java @@ -19,19 +19,14 @@ package org.elasticsearch.index.cache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterService; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.cache.filter.FilterCache; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; import org.elasticsearch.index.settings.IndexSettings; import java.io.Closeable; @@ -40,34 +35,28 @@ import java.io.IOException; /** * */ -public class IndexCache extends AbstractIndexComponent implements Closeable, ClusterStateListener { +public class IndexCache extends AbstractIndexComponent implements Closeable { private final FilterCache filterCache; - private final QueryParserCache queryParserCache; + private final QueryCachingPolicy filterCachingPolicy; private final BitsetFilterCache bitsetFilterCache; - private ClusterService clusterService; - @Inject - public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryParserCache queryParserCache, BitsetFilterCache bitsetFilterCache) { + public IndexCache(Index index, @IndexSettings Settings indexSettings, FilterCache filterCache, QueryCachingPolicy filterCachingPolicy, BitsetFilterCache bitsetFilterCache) { super(index, indexSettings); this.filterCache = filterCache; - this.queryParserCache = queryParserCache; + this.filterCachingPolicy = filterCachingPolicy; this.bitsetFilterCache = bitsetFilterCache; } - @Inject(optional = true) - public void setClusterService(@Nullable ClusterService clusterService) { - this.clusterService = clusterService; - if (clusterService != null) { - clusterService.add(this); - } - } - public FilterCache filter() { return filterCache; } + public QueryCachingPolicy filterPolicy() { + return filterCachingPolicy; + } + /** * Return the {@link BitsetFilterCache} for this index. */ @@ -75,29 +64,14 @@ public class IndexCache extends AbstractIndexComponent implements Closeable, Clu return bitsetFilterCache; } - public QueryParserCache queryParserCache() { - return this.queryParserCache; - } - @Override public void close() throws IOException { - IOUtils.close(filterCache, queryParserCache, bitsetFilterCache); - if (clusterService != null) { - clusterService.remove(this); - } + IOUtils.close(filterCache, bitsetFilterCache); } public void clear(String reason) { filterCache.clear(reason); - queryParserCache.clear(); bitsetFilterCache.clear(reason); } - @Override - public void clusterChanged(ClusterChangedEvent event) { - // clear the query parser cache if the metadata (mappings) changed... - if (event.metaDataChanged()) { - queryParserCache.clear(); - } - } } diff --git a/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java b/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java index 796ad7388b4..3a0c9fc584e 100644 --- a/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java +++ b/src/main/java/org/elasticsearch/index/cache/IndexCacheModule.java @@ -23,7 +23,6 @@ import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.bitset.BitsetFilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule; -import org.elasticsearch.index.cache.query.parser.QueryParserCacheModule; /** * @@ -39,7 +38,6 @@ public class IndexCacheModule extends AbstractModule { @Override protected void configure() { new FilterCacheModule(settings).configure(binder()); - new QueryParserCacheModule(settings).configure(binder()); new BitsetFilterCacheModule(settings).configure(binder()); bind(IndexCache.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java index de622e4d689..284ecc0ecb7 100644 --- a/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/bitset/BitsetFilterCache.java @@ -23,6 +23,7 @@ import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; + import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; @@ -35,20 +36,19 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.NoCacheFilter; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesWarmer; import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.threadpool.ThreadPool; @@ -104,7 +104,6 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea public BitDocIdSetFilter getBitDocIdSetFilter(Filter filter) { assert filter != null; - assert !(filter instanceof NoCacheFilter); return new BitDocIdSetFilterWrapper(filter); } @@ -114,7 +113,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea } @Override - public void close() throws ElasticsearchException { + public void close() { indicesWarmer.removeListener(warmer); clear("close"); } @@ -266,7 +265,7 @@ public class BitsetFilterCache extends AbstractIndexComponent implements LeafRea } if (hasNested) { - warmUp.add(NonNestedDocsFilter.INSTANCE); + warmUp.add(Queries.newNonNestedFilter()); } final Executor executor = threadPool.executor(executor()); diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java index a16b5da2bd9..37c45e3adf7 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCache.java @@ -19,19 +19,14 @@ package org.elasticsearch.index.cache.filter; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.index.IndexComponent; -import org.elasticsearch.index.IndexService; import java.io.Closeable; /** * */ -public interface FilterCache extends IndexComponent, Closeable { +public interface FilterCache extends IndexComponent, Closeable, org.apache.lucene.search.QueryCache { static class EntriesStats { public final long sizeInBytes; @@ -43,16 +38,5 @@ public interface FilterCache extends IndexComponent, Closeable { } } - // we need to "inject" the index service to not create cyclic dep - void setIndexService(IndexService indexService); - - String type(); - - Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy); - - void clear(Object reader); - void clear(String reason); - - void clear(String reason, String[] keys); } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java index 551ea4fa279..20496e3266b 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheModule.java @@ -24,7 +24,7 @@ import org.apache.lucene.search.UsageTrackingQueryCachingPolicy; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; /** * @@ -46,7 +46,7 @@ public class FilterCacheModule extends AbstractModule { @Override protected void configure() { bind(FilterCache.class) - .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) + .to(settings.getAsClass(FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class, "org.elasticsearch.index.cache.filter.", "FilterCache")) .in(Scopes.SINGLETON); // the filter cache is a node-level thing, however we want the most popular filters // to be computed on a per-index basis, that is why we don't use the SINGLETON diff --git a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java index e56a1145d08..948f7e57702 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/FilterCacheStats.java @@ -33,32 +33,79 @@ import java.io.IOException; */ public class FilterCacheStats implements Streamable, ToXContent { - long memorySize; - long evictions; + long ramBytesUsed; + long hitCount; + long missCount; + long cacheCount; + long cacheSize; public FilterCacheStats() { } - public FilterCacheStats(long memorySize, long evictions) { - this.memorySize = memorySize; - this.evictions = evictions; + public FilterCacheStats(long ramBytesUsed, long hitCount, long missCount, long cacheCount, long cacheSize) { + this.ramBytesUsed = ramBytesUsed; + this.hitCount = hitCount; + this.missCount = missCount; + this.cacheCount = cacheCount; + this.cacheSize = cacheSize; } public void add(FilterCacheStats stats) { - this.memorySize += stats.memorySize; - this.evictions += stats.evictions; + ramBytesUsed += stats.ramBytesUsed; + hitCount += stats.hitCount; + missCount += stats.missCount; + cacheCount += stats.cacheCount; + cacheSize += stats.cacheSize; } public long getMemorySizeInBytes() { - return this.memorySize; + return ramBytesUsed; } public ByteSizeValue getMemorySize() { - return new ByteSizeValue(memorySize); + return new ByteSizeValue(ramBytesUsed); } + /** + * The total number of lookups in the cache. + */ + public long getTotalCount() { + return hitCount + missCount; + } + + /** + * The number of successful lookups in the cache. + */ + public long getHitCount() { + return hitCount; + } + + /** + * The number of lookups in the cache that failed to retrieve a {@link DocIdSet}. + */ + public long getMissCount() { + return missCount; + } + + /** + * The number of {@link DocIdSet}s that have been cached. + */ + public long getCacheCount() { + return cacheCount; + } + + /** + * The number of {@link DocIdSet}s that are in the cache. + */ + public long getCacheSize() { + return cacheSize; + } + + /** + * The number of {@link DocIdSet}s that have been evicted from the cache. + */ public long getEvictions() { - return this.evictions; + return cacheCount - cacheSize; } public static FilterCacheStats readFilterCacheStats(StreamInput in) throws IOException { @@ -67,22 +114,34 @@ public class FilterCacheStats implements Streamable, ToXContent { return stats; } + @Override public void readFrom(StreamInput in) throws IOException { - memorySize = in.readVLong(); - evictions = in.readVLong(); + ramBytesUsed = in.readLong(); + hitCount = in.readLong(); + missCount = in.readLong(); + cacheCount = in.readLong(); + cacheSize = in.readLong(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVLong(memorySize); - out.writeVLong(evictions); + out.writeLong(ramBytesUsed); + out.writeLong(hitCount); + out.writeLong(missCount); + out.writeLong(cacheCount); + out.writeLong(cacheSize); } @Override public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(Fields.FILTER_CACHE); - builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, memorySize); + builder.byteSizeField(Fields.MEMORY_SIZE_IN_BYTES, Fields.MEMORY_SIZE, ramBytesUsed); + builder.field(Fields.TOTAL_COUNT, getTotalCount()); + builder.field(Fields.HIT_COUNT, getHitCount()); + builder.field(Fields.MISS_COUNT, getMissCount()); + builder.field(Fields.CACHE_SIZE, getCacheSize()); + builder.field(Fields.CACHE_COUNT, getCacheCount()); builder.field(Fields.EVICTIONS, getEvictions()); builder.endObject(); return builder; @@ -92,6 +151,12 @@ public class FilterCacheStats implements Streamable, ToXContent { static final XContentBuilderString FILTER_CACHE = new XContentBuilderString("filter_cache"); static final XContentBuilderString MEMORY_SIZE = new XContentBuilderString("memory_size"); static final XContentBuilderString MEMORY_SIZE_IN_BYTES = new XContentBuilderString("memory_size_in_bytes"); + static final XContentBuilderString TOTAL_COUNT = new XContentBuilderString("total_count"); + static final XContentBuilderString HIT_COUNT = new XContentBuilderString("hit_count"); + static final XContentBuilderString MISS_COUNT = new XContentBuilderString("miss_count"); + static final XContentBuilderString CACHE_SIZE = new XContentBuilderString("cache_size"); + static final XContentBuilderString CACHE_COUNT = new XContentBuilderString("cache_count"); static final XContentBuilderString EVICTIONS = new XContentBuilderString("evictions"); } + } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java index 67ab084bd07..97f75094580 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/ShardFilterCache.java @@ -19,45 +19,35 @@ package org.elasticsearch.index.cache.filter; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.docset.DocIdSets; -import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.indices.cache.filter.IndicesFilterCache; + +import java.io.Closeable; +import java.io.IOException; /** */ -public class ShardFilterCache extends AbstractIndexShardComponent implements RemovalListener { +public class ShardFilterCache extends AbstractIndexShardComponent implements Closeable { - final CounterMetric evictionsMetric = new CounterMetric(); - final CounterMetric totalMetric = new CounterMetric(); + final IndicesFilterCache cache; @Inject - public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings) { + public ShardFilterCache(ShardId shardId, @IndexSettings Settings indexSettings, IndicesFilterCache cache) { super(shardId, indexSettings); + this.cache = cache; } public FilterCacheStats stats() { - return new FilterCacheStats(totalMetric.count(), evictionsMetric.count()); - } - - public void onCached(long sizeInBytes) { - totalMetric.inc(sizeInBytes); + return cache.getStats(shardId); } @Override - public void onRemoval(RemovalNotification removalNotification) { - if (removalNotification.wasEvicted()) { - evictionsMetric.inc(); - } - if (removalNotification.getValue() != null) { - totalMetric.dec(DocIdSets.sizeInBytes(removalNotification.getValue())); - } + public void close() throws IOException { + cache.onClose(shardId); } + } diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java similarity index 55% rename from src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java rename to src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java index 2a9dcd8235d..5dfaf4c7799 100644 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/none/NoneQueryParserCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/index/IndexFilterCache.java @@ -17,42 +17,47 @@ * under the License. */ -package org.elasticsearch.index.cache.query.parser.none; +package org.elasticsearch.index.cache.filter.index; -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.Weight; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; +import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.indices.cache.filter.IndicesFilterCache; /** - * + * The index-level filter cache. This class mostly delegates to the node-level + * filter cache: {@link IndicesFilterCache}. */ -public class NoneQueryParserCache extends AbstractIndexComponent implements QueryParserCache { +public class IndexFilterCache extends AbstractIndexComponent implements FilterCache { + + final IndicesFilterCache indicesFilterCache; @Inject - public NoneQueryParserCache(Index index, @IndexSettings Settings indexSettings) { + public IndexFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { super(index, indexSettings); - } - - @Override - public Query get(QueryParserSettings queryString) { - return null; - } - - @Override - public void put(QueryParserSettings queryString, Query query) { - } - - @Override - public void clear() { + this.indicesFilterCache = indicesFilterCache; } @Override public void close() throws ElasticsearchException { + clear("close"); } + + @Override + public void clear(String reason) { + logger.debug("full cache clear, reason [{}]", reason); + indicesFilterCache.clearIndex(index.getName()); + } + + @Override + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + return indicesFilterCache.doCache(weight, policy); + } + } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java index 41a704a9afd..ded3c207a42 100644 --- a/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java +++ b/src/main/java/org/elasticsearch/index/cache/filter/none/NoneFilterCache.java @@ -19,15 +19,12 @@ package org.elasticsearch.index.cache.filter.none; -import org.apache.lucene.search.Filter; import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.common.Nullable; +import org.apache.lucene.search.Weight; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.settings.IndexSettings; @@ -42,38 +39,18 @@ public class NoneFilterCache extends AbstractIndexComponent implements FilterCac logger.debug("Using no filter cache"); } - @Override - public void setIndexService(IndexService indexService) { - // nothing to do here... - } - - @Override - public String type() { - return "none"; - } - @Override public void close() { // nothing to do here } @Override - public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy policy) { - return filterToCache; + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + return weight; } @Override public void clear(String reason) { // nothing to do here } - - @Override - public void clear(String reason, String[] keys) { - // nothing to do there - } - - @Override - public void clear(Object reader) { - // nothing to do here - } } diff --git a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java b/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java deleted file mode 100644 index 2720d32d9d1..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/filter/weighted/WeightedFilterCache.java +++ /dev/null @@ -1,277 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.filter.weighted; - -import com.google.common.cache.Cache; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.Weigher; - -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SegmentReader; -import org.apache.lucene.search.BitsFilteredDocIdSet; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.docset.DocIdSets; -import org.elasticsearch.common.lucene.search.CachedFilter; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.common.lucene.search.ResolvableFilter; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.cache.filter.FilterCache; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; - -import java.io.IOException; -import java.util.concurrent.ConcurrentMap; - -public class WeightedFilterCache extends AbstractIndexComponent implements FilterCache, SegmentReader.CoreClosedListener, IndexReader.ReaderClosedListener { - - final IndicesFilterCache indicesFilterCache; - IndexService indexService; - - final ConcurrentMap seenReaders = ConcurrentCollections.newConcurrentMap(); - - @Inject - public WeightedFilterCache(Index index, @IndexSettings Settings indexSettings, IndicesFilterCache indicesFilterCache) { - super(index, indexSettings); - this.indicesFilterCache = indicesFilterCache; - } - - @Override - public void setIndexService(IndexService indexService) { - this.indexService = indexService; - } - - @Override - public String type() { - return "weighted"; - } - - @Override - public void close() throws ElasticsearchException { - clear("close"); - } - - @Override - public void onClose(IndexReader reader) { - clear(reader.getCoreCacheKey()); - } - - - @Override - public void clear(String reason) { - logger.debug("full cache clear, reason [{}]", reason); - for (Object readerKey : seenReaders.keySet()) { - Boolean removed = seenReaders.remove(readerKey); - if (removed == null) { - return; - } - indicesFilterCache.addReaderKeyToClean(readerKey); - } - } - - @Override - public void clear(String reason, String[] keys) { - logger.debug("clear keys [], reason [{}]", reason, keys); - for (String key : keys) { - final HashedBytesRef keyBytes = new HashedBytesRef(key); - for (Object readerKey : seenReaders.keySet()) { - indicesFilterCache.cache().invalidate(new FilterCacheKey(readerKey, keyBytes)); - } - } - } - - @Override - public void onClose(Object coreKey) { - clear(coreKey); - } - - @Override - public void clear(Object coreCacheKey) { - // we add the seen reader before we add the first cache entry for this reader - // so, if we don't see it here, its won't be in the cache - Boolean removed = seenReaders.remove(coreCacheKey); - if (removed == null) { - return; - } - indicesFilterCache.addReaderKeyToClean(coreCacheKey); - } - - @Override - public Filter cache(Filter filterToCache, @Nullable HashedBytesRef cacheKey, QueryCachingPolicy cachePolicy) { - if (filterToCache == null) { - return null; - } - if (filterToCache instanceof NoCacheFilter) { - return filterToCache; - } - if (CachedFilter.isCached(filterToCache)) { - return filterToCache; - } - if (filterToCache instanceof ResolvableFilter) { - throw new IllegalArgumentException("Cannot cache instances of ResolvableFilter: " + filterToCache); - } - return new FilterCacheFilterWrapper(filterToCache, cacheKey, cachePolicy, this); - } - - static class FilterCacheFilterWrapper extends CachedFilter { - - private final Filter filter; - private final Object filterCacheKey; - private final QueryCachingPolicy cachePolicy; - private final WeightedFilterCache cache; - - FilterCacheFilterWrapper(Filter filter, Object cacheKey, QueryCachingPolicy cachePolicy, WeightedFilterCache cache) { - this.filter = filter; - this.filterCacheKey = cacheKey != null ? cacheKey : filter; - this.cachePolicy = cachePolicy; - this.cache = cache; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - if (context.ord == 0) { - cachePolicy.onUse(filter); - } - FilterCacheKey cacheKey = new FilterCacheKey(context.reader().getCoreCacheKey(), filterCacheKey); - Cache innerCache = cache.indicesFilterCache.cache(); - - DocIdSet cacheValue = innerCache.getIfPresent(cacheKey); - final DocIdSet ret; - if (cacheValue != null) { - ret = cacheValue; - } else { - final DocIdSet uncached = filter.getDocIdSet(context, null); - if (cachePolicy.shouldCache(filter, context)) { - if (!cache.seenReaders.containsKey(context.reader().getCoreCacheKey())) { - Boolean previous = cache.seenReaders.putIfAbsent(context.reader().getCoreCacheKey(), Boolean.TRUE); - if (previous == null) { - // we add a core closed listener only, for non core IndexReaders we rely on clear being called (percolator for example) - context.reader().addCoreClosedListener(cache); - } - } - // we can't pass down acceptedDocs provided, because we are caching the result, and acceptedDocs - // might be specific to a query. We don't pass the live docs either because a cache built for a specific - // generation of a segment might be reused by an older generation which has fewer deleted documents - cacheValue = DocIdSets.toCacheable(context.reader(), uncached); - // we might put the same one concurrently, that's fine, it will be replaced and the removal - // will be called - ShardId shardId = ShardUtils.extractShardId(context.reader()); - if (shardId != null) { - IndexShard shard = cache.indexService.shard(shardId.id()); - if (shard != null) { - cacheKey.removalListener = shard.filterCache(); - shard.filterCache().onCached(DocIdSets.sizeInBytes(cacheValue)); - } - } - innerCache.put(cacheKey, cacheValue); - ret = cacheValue; - } else { - // uncached - ret = uncached; - } - } - - return BitsFilteredDocIdSet.wrap(DocIdSets.isEmpty(ret) ? null : ret, acceptDocs); - } - - @Override - public String toString(String field) { - return "cache(" + filter + ")"; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof FilterCacheFilterWrapper)) return false; - return this.filter.equals(((FilterCacheFilterWrapper) o).filter); - } - - @Override - public int hashCode() { - return filter.hashCode() ^ 0x1117BF25; - } - } - - - /** A weigher for the Guava filter cache that uses a minimum entry size */ - public static class FilterCacheValueWeigher implements Weigher { - - private final int minimumEntrySize; - - public FilterCacheValueWeigher(int minimumEntrySize) { - this.minimumEntrySize = minimumEntrySize; - } - - @Override - public int weigh(FilterCacheKey key, DocIdSet value) { - int weight = (int) Math.min(DocIdSets.sizeInBytes(value), Integer.MAX_VALUE); - return Math.max(weight, this.minimumEntrySize); - } - } - - public static class FilterCacheKey { - private final Object readerKey; - private final Object filterKey; - - // if we know, we will try and set the removal listener (for statistics) - // its ok that its not volatile because we make sure we only set it when the object is created before its shared between threads - @Nullable - public RemovalListener removalListener; - - public FilterCacheKey(Object readerKey, Object filterKey) { - this.readerKey = readerKey; - this.filterKey = filterKey; - } - - public Object readerKey() { - return readerKey; - } - - public Object filterKey() { - return filterKey; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; -// if (o == null || getClass() != o.getClass()) return false; - FilterCacheKey that = (FilterCacheKey) o; - return (readerKey().equals(that.readerKey()) && filterKey.equals(that.filterKey)); - } - - @Override - public int hashCode() { - return readerKey().hashCode() + 31 * filterKey.hashCode(); - } - } -} diff --git a/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java b/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java index e7246d0e942..808542fadc4 100644 --- a/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java +++ b/src/main/java/org/elasticsearch/index/cache/query/ShardQueryCache.java @@ -21,14 +21,10 @@ package org.elasticsearch.index.cache.query; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; -import org.apache.lucene.search.DocIdSet; -import org.elasticsearch.common.bytes.BytesReference; + import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.filter.FilterCacheStats; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.shard.ShardId; diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java b/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java deleted file mode 100644 index 7e090b62659..00000000000 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCache.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser.resident; - -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; -import org.elasticsearch.index.settings.IndexSettings; - -import java.util.concurrent.TimeUnit; - -/** - * A small (by default) query parser cache mainly to not parse the same query string several times - * if several shards exists on the same node. - */ -public class ResidentQueryParserCache extends AbstractIndexComponent implements QueryParserCache { - - private final Cache cache; - - private volatile int maxSize; - private volatile TimeValue expire; - - @Inject - public ResidentQueryParserCache(Index index, @IndexSettings Settings indexSettings) { - super(index, indexSettings); - - this.maxSize = indexSettings.getAsInt("index.cache.query.parser.resident.max_size", 100); - this.expire = indexSettings.getAsTime("index.cache.query.parser.resident.expire", null); - logger.debug("using [resident] query cache with max_size [{}], expire [{}]", maxSize, expire); - - CacheBuilder cacheBuilder = CacheBuilder.newBuilder().maximumSize(maxSize); - if (expire != null) { - cacheBuilder.expireAfterAccess(expire.nanos(), TimeUnit.NANOSECONDS); - } - - this.cache = cacheBuilder.build(); - } - - @Override - public Query get(QueryParserSettings queryString) { - Query value = cache.getIfPresent(queryString); - if (value != null) { - return value.clone(); - } else { - return null; - } - } - - @Override - public void put(QueryParserSettings queryString, Query query) { - if (queryString.isCacheable()) { - cache.put(queryString, query); - } - } - - @Override - public void clear() { - cache.invalidateAll(); - } - - @Override - public void close() throws ElasticsearchException { - cache.invalidateAll(); - } -} diff --git a/src/main/java/org/elasticsearch/index/codec/CodecService.java b/src/main/java/org/elasticsearch/index/codec/CodecService.java index cd1940eb8da..bd152052651 100644 --- a/src/main/java/org/elasticsearch/index/codec/CodecService.java +++ b/src/main/java/org/elasticsearch/index/codec/CodecService.java @@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableMap; import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat.Mode; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.ImmutableSettings; @@ -86,10 +85,10 @@ public class CodecService extends AbstractIndexComponent { return mapperService; } - public Codec codec(String name) throws ElasticsearchIllegalArgumentException { + public Codec codec(String name) { Codec codec = codecs.get(name); if (codec == null) { - throw new ElasticsearchIllegalArgumentException("failed to find codec [" + name + "]"); + throw new IllegalArgumentException("failed to find codec [" + name + "]"); } return codec; } diff --git a/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/src/main/java/org/elasticsearch/index/engine/CommitStats.java index d1e4ed7a2b2..de0474c5d7e 100644 --- a/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -78,7 +78,7 @@ public final class CommitStats implements Streamable, ToXContent { public void readFrom(StreamInput in) throws IOException { MapBuilder builder = MapBuilder.newMapBuilder(); for (int i = in.readVInt(); i > 0; i--) { - builder.put(in.readString(), in.readOptionalString()); + builder.put(in.readString(), in.readString()); } userData = builder.immutableMap(); generation = in.readLong(); @@ -90,7 +90,7 @@ public final class CommitStats implements Streamable, ToXContent { out.writeVInt(userData.size()); for (Map.Entry entry : userData.entrySet()) { out.writeString(entry.getKey()); - out.writeOptionalString(entry.getValue()); + out.writeString(entry.getValue()); } out.writeLong(generation); out.writeString(id); diff --git a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java index 9c12348d90d..d555cbc1a43 100644 --- a/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java +++ b/src/main/java/org/elasticsearch/index/engine/DeleteByQueryFailedEngineException.java @@ -21,12 +21,11 @@ package org.elasticsearch.index.engine; import org.elasticsearch.index.shard.ShardId; -/** - * - */ +/** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ +@Deprecated public class DeleteByQueryFailedEngineException extends EngineException { public DeleteByQueryFailedEngineException(ShardId shardId, Engine.DeleteByQuery deleteByQuery, Throwable cause) { super(shardId, "Delete by query failed for [" + deleteByQuery.query() + "]", cause); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/engine/Engine.java b/src/main/java/org/elasticsearch/index/engine/Engine.java index cd72d789c70..dc17c856031 100644 --- a/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.engine; +import com.google.common.base.Preconditions; import org.apache.lucene.index.*; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; @@ -27,11 +28,8 @@ import org.apache.lucene.search.SearcherManager; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; @@ -47,6 +45,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; @@ -106,7 +105,7 @@ public abstract class Engine implements Closeable { /** * Tries to extract a segment reader from the given index reader. - * If no SegmentReader can be extracted an {@link org.elasticsearch.ElasticsearchIllegalStateException} is thrown. + * If no SegmentReader can be extracted an {@link IllegalStateException} is thrown. */ protected static SegmentReader segmentReader(LeafReader reader) { if (reader instanceof SegmentReader) { @@ -116,7 +115,7 @@ public abstract class Engine implements Closeable { return segmentReader(FilterLeafReader.unwrap(fReader)); } // hard fail - we can't get a SegmentReader - throw new ElasticsearchIllegalStateException("Can not extract segment reader from given index reader [" + reader + "]"); + throw new IllegalStateException("Can not extract segment reader from given index reader [" + reader + "]"); } /** @@ -202,10 +201,12 @@ public abstract class Engine implements Closeable { public abstract void create(Create create) throws EngineException; - public abstract void index(Index index) throws EngineException; + public abstract boolean index(Index index) throws EngineException; public abstract void delete(Delete delete) throws EngineException; + /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ + @Deprecated public abstract void delete(DeleteByQuery delete) throws EngineException; /** @@ -557,11 +558,11 @@ public abstract class Engine implements Closeable { */ public static interface RecoveryHandler { - void phase1(SnapshotIndexCommit snapshot) throws ElasticsearchException; + void phase1(SnapshotIndexCommit snapshot); - void phase2(Translog.Snapshot snapshot) throws ElasticsearchException; + void phase2(Translog.Snapshot snapshot); - void phase3(Translog.Snapshot snapshot) throws ElasticsearchException; + void phase3(Translog.Snapshot snapshot); } public static class Searcher implements Releasable { @@ -590,7 +591,7 @@ public abstract class Engine implements Closeable { } @Override - public void close() throws ElasticsearchException { + public void close() { // Nothing to close here } } @@ -724,6 +725,12 @@ public abstract class Engine implements Closeable { public long endTime() { return this.endTime; } + + /** + * Execute this operation against the provided {@link IndexShard} and + * return whether the document was created. + */ + public abstract boolean execute(IndexShard shard); } public static final class Create extends IndexingOperation { @@ -752,10 +759,15 @@ public abstract class Engine implements Closeable { public boolean autoGeneratedId() { return this.autoGeneratedId; } + + @Override + public boolean execute(IndexShard shard) { + shard.create(this); + return true; + } } public static final class Index extends IndexingOperation { - private boolean created; public Index(DocumentMapper docMapper, Term uid, ParsedDocument doc, long version, VersionType versionType, Origin origin, long startTime, boolean canHaveDuplicates) { super(docMapper, uid, doc, version, versionType, origin, startTime, canHaveDuplicates); @@ -774,15 +786,9 @@ public abstract class Engine implements Closeable { return Type.INDEX; } - /** - * @return true if object was created - */ - public boolean created() { - return created; - } - - public void created(boolean created) { - this.created = created; + @Override + public boolean execute(IndexShard shard) { + return shard.index(this); } } @@ -1057,7 +1063,7 @@ public abstract class Engine implements Closeable { protected abstract SearcherManager getSearcherManager(); - protected abstract void closeNoLock(String reason) throws ElasticsearchException; + protected abstract void closeNoLock(String reason); public void flushAndClose() throws IOException { if (isClosed.get() == false) { diff --git a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index ac072115cc7..9c069139173 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -21,6 +21,8 @@ package org.elasticsearch.index.engine; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.codecs.Codec; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; @@ -74,6 +76,8 @@ public final class EngineConfig { private final Similarity similarity; private final CodecService codecService; private final Engine.FailedEngineListener failedEngineListener; + private final QueryCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; /** * Index setting for index concurrency / number of threadstates in the indexwriter. @@ -130,7 +134,11 @@ public final class EngineConfig { /** * Creates a new {@link org.elasticsearch.index.engine.EngineConfig} */ - public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, TranslogRecoveryPerformer translogRecoveryPerformer) { + public EngineConfig(ShardId shardId, ThreadPool threadPool, ShardIndexingService indexingService, + IndexSettingsService indexSettingsService, IndicesWarmer warmer, Store store, SnapshotDeletionPolicy deletionPolicy, + Translog translog, MergePolicyProvider mergePolicyProvider, MergeSchedulerProvider mergeScheduler, Analyzer analyzer, + Similarity similarity, CodecService codecService, Engine.FailedEngineListener failedEngineListener, + TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache filterCache, QueryCachingPolicy filterCachingPolicy) { this.shardId = shardId; this.threadPool = threadPool; this.indexingService = indexingService; @@ -155,6 +163,8 @@ public final class EngineConfig { versionMapSizeSetting = indexSettings.get(INDEX_VERSION_MAP_SIZE, DEFAULT_VERSION_MAP_SIZE); updateVersionMapSize(); this.translogRecoveryPerformer = translogRecoveryPerformer; + this.filterCache = filterCache; + this.filterCachingPolicy = filterCachingPolicy; } /** updates {@link #versionMapSize} based on current setting and {@link #indexingBufferSize} */ @@ -396,4 +406,18 @@ public final class EngineConfig { public TranslogRecoveryPerformer getTranslogRecoveryPerformer() { return translogRecoveryPerformer; } + + /** + * Return the cache to use for filters. + */ + public QueryCache getFilterCache() { + return filterCache; + } + + /** + * Return the policy to use when caching filters. + */ + public QueryCachingPolicy getFilterCachingPolicy() { + return filterCachingPolicy; + } } diff --git a/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java b/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java index 53bff1d8ef6..3384f78433f 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineSearcher.java @@ -23,9 +23,7 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.store.Store; import java.io.IOException; @@ -48,19 +46,19 @@ public class EngineSearcher extends Engine.Searcher { } @Override - public void close() throws ElasticsearchException { + public void close() { if (!released.compareAndSet(false, true)) { /* In general, searchers should never be released twice or this would break reference counting. There is one rare case * when it might happen though: when the request and the Reaper thread would both try to release it in a very short amount * of time, this is why we only log a warning instead of throwing an exception. */ - logger.warn("Searcher was released twice", new ElasticsearchIllegalStateException("Double release")); + logger.warn("Searcher was released twice", new IllegalStateException("Double release")); return; } try { manager.release(this.searcher()); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot close", e); + throw new IllegalStateException("Cannot close", e); } catch (AlreadyClosedException e) { /* this one can happen if we already closed the * underlying store / directory and we call into the diff --git a/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java b/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java index 7255b686bc8..fa8d9a6a5c1 100644 --- a/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java +++ b/src/main/java/org/elasticsearch/index/engine/EngineSearcherFactory.java @@ -40,7 +40,9 @@ public class EngineSearcherFactory extends SearcherFactory { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); + IndexSearcher searcher = super.newSearcher(reader, previousReader); + searcher.setQueryCache(engineConfig.getFilterCache()); + searcher.setQueryCachingPolicy(engineConfig.getFilterCachingPolicy()); searcher.setSimilarity(engineConfig.getSimilarity()); return searcher; } diff --git a/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java b/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java index 3534ef9268e..223fa306697 100644 --- a/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java +++ b/src/main/java/org/elasticsearch/index/engine/FlushingRecoveryCounter.java @@ -40,7 +40,7 @@ public final class FlushingRecoveryCounter extends RecoveryCounter { } @Override - int endRecovery() throws ElasticsearchException { + int endRecovery() { int left = super.endRecovery(); if (left == 0) { try { diff --git a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index ac2fca4e3da..1a717416a87 100644 --- a/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -364,15 +364,16 @@ public class InternalEngine extends Engine { } @Override - public void index(Index index) throws EngineException { + public boolean index(Index index) throws EngineException { + final boolean created; try (ReleasableLock lock = readLock.acquire()) { ensureOpen(); if (index.origin() == Operation.Origin.RECOVERY) { // Don't throttle recovery operations - innerIndex(index); + created = innerIndex(index); } else { try (Releasable r = throttle.acquireThrottle()) { - innerIndex(index); + created = innerIndex(index); } } flushNeeded = true; @@ -381,6 +382,7 @@ public class InternalEngine extends Engine { throw new IndexFailedEngineException(shardId, index, t); } checkVersionMapRefresh(); + return created; } /** @@ -410,7 +412,7 @@ public class InternalEngine extends Engine { } } - private void innerIndex(Index index) throws IOException { + private boolean innerIndex(Index index) throws IOException { synchronized (dirtyLock(index.uid())) { final long currentVersion; VersionValue versionValue = versionMap.getUnderLock(index.uid().bytes()); @@ -428,17 +430,18 @@ public class InternalEngine extends Engine { long expectedVersion = index.version(); if (index.versionType().isVersionConflictForWrites(currentVersion, expectedVersion)) { if (index.origin() == Operation.Origin.RECOVERY) { - return; + return false; } else { throw new VersionConflictEngineException(shardId, index.type(), index.id(), currentVersion, expectedVersion); } } updatedVersion = index.versionType().updateVersion(currentVersion, expectedVersion); + final boolean created; index.updateVersion(updatedVersion); if (currentVersion == Versions.NOT_FOUND) { // document does not exists, we can optimize for create - index.created(true); + created = true; if (index.docs().size() > 1) { indexWriter.addDocuments(index.docs()); } else { @@ -446,7 +449,9 @@ public class InternalEngine extends Engine { } } else { if (versionValue != null) { - index.created(versionValue.delete()); // we have a delete which is not GC'ed... + created = versionValue.delete(); // we have a delete which is not GC'ed... + } else { + created = false; } if (index.docs().size() > 1) { indexWriter.updateDocuments(index.uid(), index.docs()); @@ -459,6 +464,7 @@ public class InternalEngine extends Engine { versionMap.putUnderLock(index.uid().bytes(), new VersionValue(updatedVersion, translogLocation)); indexingService.postIndexUnderLock(index); + return created; } } @@ -531,6 +537,8 @@ public class InternalEngine extends Engine { } } + /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ + @Deprecated @Override public void delete(DeleteByQuery delete) throws EngineException { try (ReleasableLock lock = readLock.acquire()) { @@ -979,7 +987,7 @@ public class InternalEngine extends Engine { * is failed. */ @Override - protected final void closeNoLock(String reason) throws ElasticsearchException { + protected final void closeNoLock(String reason) { if (isClosed.compareAndSet(false, true)) { assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() : "Either the write lock must be held or the engine must be currently be failing itself"; try { @@ -1077,7 +1085,7 @@ public class InternalEngine extends Engine { try { assert isMergedSegment(reader); if (warmer != null) { - final Engine.Searcher searcher = new Searcher("warmer", new IndexSearcher(reader)); + final Engine.Searcher searcher = new Searcher("warmer", searcherFactory.newSearcher(reader, null)); final IndicesWarmer.WarmerContext context = new IndicesWarmer.WarmerContext(shardId, searcher); warmer.warmNewReaders(context); } @@ -1110,8 +1118,7 @@ public class InternalEngine extends Engine { @Override public IndexSearcher newSearcher(IndexReader reader, IndexReader previousReader) throws IOException { - IndexSearcher searcher = new IndexSearcher(reader); - searcher.setSimilarity(engineConfig.getSimilarity()); + IndexSearcher searcher = super.newSearcher(reader, previousReader); if (warmer != null) { // we need to pass a custom searcher that does not release anything on Engine.Search Release, // we will release explicitly @@ -1143,7 +1150,8 @@ public class InternalEngine extends Engine { } if (!readers.isEmpty()) { // we don't want to close the inner readers, just increase ref on them - newSearcher = new IndexSearcher(new MultiReader(readers.toArray(new IndexReader[readers.size()]), false)); + IndexReader newReader = new MultiReader(readers.toArray(new IndexReader[readers.size()]), false); + newSearcher = super.newSearcher(newReader, null); closeNewSearcher = true; } } @@ -1255,7 +1263,7 @@ public class InternalEngine extends Engine { } } } catch (FileNotFoundException ex) { - logger.info("no translog file found for ID: " + translogId); + logger.debug("no translog file found for ID: " + translogId); } catch (TruncatedTranslogException e) { // file is empty or header has been half-written and should be ignored logger.trace("ignoring truncation exception, the translog is either empty or half-written", e); diff --git a/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java b/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java index cee6dd5b8a0..28401496456 100644 --- a/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java +++ b/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java @@ -52,7 +52,7 @@ public class RecoveryCounter implements Releasable { * End the recovery counter by decrementing the store's ref and the ongoing recovery counter * @return number of ongoing recoveries remaining */ - int endRecovery() throws ElasticsearchException { + int endRecovery() { store.decRef(); int left = onGoingRecoveries.decrementAndGet(); assert onGoingRecoveries.get() >= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get(); @@ -60,7 +60,7 @@ public class RecoveryCounter implements Releasable { } @Override - public void close() throws ElasticsearchException { + public void close() { endRecovery(); } } diff --git a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java index 69887ea042e..1d783af7460 100644 --- a/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java +++ b/src/main/java/org/elasticsearch/index/engine/ShadowEngine.java @@ -28,8 +28,10 @@ import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.ReleasableLock; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; +import org.elasticsearch.index.shard.IndexShardException; import java.io.IOException; import java.util.Arrays; @@ -55,6 +57,10 @@ import java.util.List; */ public class ShadowEngine extends Engine { + /** how long to wait for an index to exist */ + public final static String NONEXISTENT_INDEX_RETRY_WAIT = "index.shadow.wait_for_initial_commit"; + public final static TimeValue DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT = TimeValue.timeValueSeconds(5); + private volatile SearcherManager searcherManager; private volatile SegmentInfos lastCommittedSegmentInfos; @@ -62,15 +68,24 @@ public class ShadowEngine extends Engine { public ShadowEngine(EngineConfig engineConfig) { super(engineConfig); SearcherFactory searcherFactory = new EngineSearcherFactory(engineConfig); + final long nonexistentRetryTime = engineConfig.getIndexSettings() + .getAsTime(NONEXISTENT_INDEX_RETRY_WAIT, DEFAULT_NONEXISTENT_INDEX_RETRY_WAIT) + .getMillis(); try { DirectoryReader reader = null; store.incRef(); boolean success = false; try { - reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId); - this.searcherManager = new SearcherManager(reader, searcherFactory); - this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); - success = true; + if (Lucene.waitForIndex(store.directory(), nonexistentRetryTime)) { + reader = ElasticsearchDirectoryReader.wrap(DirectoryReader.open(store.directory()), shardId); + this.searcherManager = new SearcherManager(reader, searcherFactory); + this.lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + success = true; + } else { + throw new IndexShardException(shardId, "failed to open a shadow engine after" + + nonexistentRetryTime + "ms, " + + "directory is not an index"); + } } catch (Throwable e) { logger.warn("failed to create new reader", e); throw e; @@ -93,7 +108,7 @@ public class ShadowEngine extends Engine { } @Override - public void index(Index index) throws EngineException { + public boolean index(Index index) throws EngineException { throw new UnsupportedOperationException(shardId + " index operation not allowed on shadow engine"); } @@ -102,6 +117,8 @@ public class ShadowEngine extends Engine { throw new UnsupportedOperationException(shardId + " delete operation not allowed on shadow engine"); } + /** @deprecated This was removed, but we keep this API so translog can replay any DBQs on upgrade. */ + @Deprecated @Override public void delete(DeleteByQuery delete) throws EngineException { throw new UnsupportedOperationException(shardId + " delete-by-query operation not allowed on shadow engine"); @@ -203,7 +220,7 @@ public class ShadowEngine extends Engine { } @Override - protected void closeNoLock(String reason) throws ElasticsearchException { + protected void closeNoLock(String reason) { if (isClosed.compareAndSet(false, true)) { try { logger.debug("shadow replica close searcher manager refCount: {}", store.refCount()); diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java index 35c578ac4a7..19f3be065e8 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java +++ b/src/main/java/org/elasticsearch/index/fielddata/IndexFieldDataService.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.fielddata; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.collect.Tuple; @@ -230,7 +229,7 @@ public class IndexFieldDataService extends AbstractIndexComponent { final FieldMapper.Names fieldNames = mapper.names(); final FieldDataType type = mapper.fieldDataType(); if (type == null) { - throw new ElasticsearchIllegalArgumentException("found no fielddata type for field [" + fieldNames.fullName() + "]"); + throw new IllegalArgumentException("found no fielddata type for field [" + fieldNames.fullName() + "]"); } final boolean docValues = mapper.hasDocValues(); final String key = fieldNames.indexName(); @@ -259,7 +258,7 @@ public class IndexFieldDataService extends AbstractIndexComponent { builder = buildersByType.get(type.getType()); } if (builder == null) { - throw new ElasticsearchIllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType()); + throw new IllegalArgumentException("failed to find field data builder for field " + fieldNames.fullName() + ", and type " + type.getType()); } IndexFieldDataCache cache = fieldDataCaches.get(fieldNames.indexName()); @@ -272,7 +271,7 @@ public class IndexFieldDataService extends AbstractIndexComponent { } else if ("none".equals(cacheType)){ cache = new IndexFieldDataCache.None(); } else { - throw new ElasticsearchIllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]"); + throw new IllegalArgumentException("cache type not supported [" + cacheType + "] for field [" + fieldNames.fullName() + "]"); } fieldDataCaches.put(fieldNames.indexName(), cache); } diff --git a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java index cd20656fa51..44d90d9dd13 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/IndexNumericFieldData.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.index.fielddata.ordinals.OrdinalsBuilder; import org.elasticsearch.index.mapper.core.BooleanFieldMapper; @@ -41,7 +40,7 @@ public interface IndexNumericFieldData extends IndexFieldData { @@ -61,8 +60,8 @@ public final class DisabledIndexFieldData extends AbstractIndexFieldData filter = fdSettings.getGroups("filter"); if (filter != null && !filter.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("Doc values field data doesn't support filters [" + fieldNames.name() + "]"); + throw new IllegalArgumentException("Doc values field data doesn't support filters [" + fieldNames.name() + "]"); } if (BINARY_INDEX_FIELD_NAMES.contains(fieldNames.indexName())) { diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java index d308f98e3f9..97b2ea1bae0 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/GeoPointBinaryDVIndexFieldData.java @@ -21,8 +21,6 @@ package org.elasticsearch.index.fielddata.plain; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.DocValues; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -44,7 +42,7 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl @Override public final XFieldComparatorSource comparatorSource(@Nullable Object missingValue, MultiValueMode sortMode, Nested nested) { - throw new ElasticsearchIllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); + throw new IllegalArgumentException("can't sort on geo_point field without using specific sorting feature, like geo_distance"); } @Override @@ -52,7 +50,7 @@ public class GeoPointBinaryDVIndexFieldData extends DocValuesIndexFieldData impl try { return new GeoPointBinaryDVAtomicFieldData(DocValues.getBinary(context.reader(), fieldNames.indexName())); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot load doc values", e); + throw new IllegalStateException("Cannot load doc values", e); } } diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java index 3aad56e27cd..710ddba04a4 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/NumericDVIndexFieldData.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.index.Index; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -57,7 +56,7 @@ public class NumericDVIndexFieldData extends DocValuesIndexFieldData implements final Bits docsWithField = DocValues.getDocsWithField(reader, field); return DocValues.singleton(values, docsWithField); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Cannot load doc values", e); + throw new IllegalStateException("Cannot load doc values", e); } } diff --git a/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java b/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java index 98cc061f05a..6b4fea41894 100644 --- a/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java +++ b/src/main/java/org/elasticsearch/index/fielddata/plain/ParentChildIndexFieldData.java @@ -39,7 +39,6 @@ import org.apache.lucene.util.PagedBytes; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -427,7 +426,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData closeables = new ArrayList<>(); for (OrdinalMapAndAtomicFieldData fds : atomicFD.values()) { closeables.addAll(Arrays.asList(fds.fieldData)); @@ -505,7 +504,7 @@ public class ParentChildIndexFieldData extends AbstractIndexFieldData error = new AtomicReference<>(); - mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), indexService.indexUUID(), type, update, new MappingUpdatedAction.MappingUpdateListener() { + mappingUpdatedAction.updateMappingOnMaster(indexService.index().name(), type, update, waitForMappingUpdatePostRecovery, new MappingUpdatedAction.MappingUpdateListener() { @Override public void onMappingUpdate() { latch.countDown(); diff --git a/src/main/java/org/elasticsearch/index/get/ShardGetService.java b/src/main/java/org/elasticsearch/index/get/ShardGetService.java index a52962b1b79..eb8da964983 100644 --- a/src/main/java/org/elasticsearch/index/get/ShardGetService.java +++ b/src/main/java/org/elasticsearch/index/get/ShardGetService.java @@ -24,7 +24,6 @@ import com.google.common.collect.Sets; import org.apache.lucene.index.Term; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -99,7 +98,7 @@ public class ShardGetService extends AbstractIndexShardComponent { } public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) - throws ElasticsearchException { + { currentMetric.inc(); try { long now = System.nanoTime(); @@ -167,7 +166,7 @@ public class ShardGetService extends AbstractIndexShardComponent { return FetchSourceContext.DO_NOT_FETCH_SOURCE; } - public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) throws ElasticsearchException { + public GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) { fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); boolean loadSource = (gFields != null && gFields.length > 0) || fetchSourceContext.fetchSource(); @@ -247,7 +246,7 @@ public class ShardGetService extends AbstractIndexShardComponent { if (fieldMapper == null) { if (docMapper.objectMappers().get(field) != null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. - throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field"); + throw new IllegalArgumentException("field [" + field + "] isn't a leaf field"); } } else if (shouldGetFromSource(ignoreErrorsOnGeneratedFields, docMapper, fieldMapper)) { List values = searchLookup.source().extractRawValues(field); @@ -367,7 +366,7 @@ public class ShardGetService extends AbstractIndexShardComponent { if (fieldMapper == null) { if (docMapper.objectMappers().get(field) != null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. - throw new ElasticsearchIllegalArgumentException("field [" + field + "] isn't a leaf field"); + throw new IllegalArgumentException("field [" + field + "] isn't a leaf field"); } } else if (!fieldMapper.fieldType().stored() && !fieldMapper.isGenerated()) { if (searchLookup == null) { diff --git a/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java b/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java index 29bfe5866b5..8b95e0f132b 100644 --- a/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java +++ b/src/main/java/org/elasticsearch/index/indexing/IndexingOperationListener.java @@ -96,12 +96,4 @@ public abstract class IndexingOperationListener { public void postDelete(Engine.Delete delete) { } - - public Engine.DeleteByQuery preDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - return deleteByQuery; - } - - public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - - } } diff --git a/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java index a84010ea366..1207101d6ca 100644 --- a/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java +++ b/src/main/java/org/elasticsearch/index/indexing/ShardIndexingService.java @@ -216,19 +216,6 @@ public class ShardIndexingService extends AbstractIndexShardComponent { typeStats(delete.type()).deleteCurrent.dec(); } - public Engine.DeleteByQuery preDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - for (IndexingOperationListener listener : listeners) { - deleteByQuery = listener.preDeleteByQuery(deleteByQuery); - } - return deleteByQuery; - } - - public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { - for (IndexingOperationListener listener : listeners) { - listener.postDeleteByQuery(deleteByQuery); - } - } - public void noopUpdate(String type) { totalStats.noopUpdates.inc(); typeStats(type).noopUpdates.inc(); diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java b/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java index 9bfdce33a61..aebc4bcb5fc 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentFieldMappers.java @@ -55,7 +55,7 @@ public final class DocumentFieldMappers implements Iterable> { this.searchQuoteAnalyzer = searchQuoteAnalyzer; } - public DocumentFieldMappers copyAndAllAll(Collection> newMappers) { + public DocumentFieldMappers copyAndAllAll(Collection> newMappers) { FieldMappersLookup fieldMappers = this.fieldMappers.copyAndAddAll(newMappers); FieldNameAnalyzer indexAnalyzer = this.indexAnalyzer.copyAndAddAll(Collections2.transform(newMappers, new Function, Map.Entry>() { @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java index 11d91411e73..fa459d36b13 100644 --- a/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentMapper.java @@ -19,21 +19,17 @@ package org.elasticsearch.index.mapper; +import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import com.google.common.collect.Sets; import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexOptions; -import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; -import org.apache.lucene.util.BitDocIdSet; -import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.MapBuilder; @@ -46,10 +42,7 @@ import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; import org.elasticsearch.index.mapper.Mapping.SourceTransform; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.internal.FieldNamesFieldMapper; @@ -67,19 +60,19 @@ import org.elasticsearch.index.mapper.internal.VersionFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.mapper.object.RootObjectMapper; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; /** @@ -87,56 +80,6 @@ import java.util.concurrent.CopyOnWriteArrayList; */ public class DocumentMapper implements ToXContent { - /** - * A result of a merge. - */ - public static class MergeResult { - - private final String[] conflicts; - - public MergeResult(String[] conflicts) { - this.conflicts = conflicts; - } - - /** - * Does the merge have conflicts or not? - */ - public boolean hasConflicts() { - return conflicts.length > 0; - } - - /** - * The merge conflicts. - */ - public String[] conflicts() { - return this.conflicts; - } - } - - public static class MergeFlags { - - public static MergeFlags mergeFlags() { - return new MergeFlags(); - } - - private boolean simulate = true; - - public MergeFlags() { - } - - /** - * A simulation run, don't perform actual modifications to the mapping. - */ - public boolean simulate() { - return simulate; - } - - public MergeFlags simulate(boolean simulate) { - this.simulate = simulate; - return this; - } - } - /** * A listener to be called during the parse process. */ @@ -221,27 +164,15 @@ public class DocumentMapper implements ToXContent { } } - - private CloseableThreadLocal cache = new CloseableThreadLocal() { - @Override - protected ParseContext.InternalParseContext initialValue() { - return new ParseContext.InternalParseContext(index, indexSettings, docMapperParser, DocumentMapper.this, new ContentPath(0)); - } - }; - - private final String index; - - private final Settings indexSettings; - private final String type; private final StringAndBytesText typeText; - private final DocumentMapperParser docMapperParser; - private volatile CompressedString mappingSource; private final Mapping mapping; + private final DocumentParser documentParser; + private volatile DocumentFieldMappers fieldMappers; private volatile ImmutableMap objectMappers = ImmutableMap.of(); @@ -260,16 +191,14 @@ public class DocumentMapper implements ToXContent { RootObjectMapper rootObjectMapper, ImmutableMap meta, Map, RootMapper> rootMappers, List sourceTransforms) { - this.index = index; - this.indexSettings = indexSettings; this.type = rootObjectMapper.name(); this.typeText = new StringAndBytesText(this.type); - this.docMapperParser = docMapperParser; this.mapping = new Mapping( rootObjectMapper, rootMappers.values().toArray(new RootMapper[rootMappers.values().size()]), sourceTransforms.toArray(new SourceTransform[sourceTransforms.size()]), meta); + this.documentParser = new DocumentParser(index, indexSettings, docMapperParser, this); this.typeFilter = typeMapper().termFilter(type, null); @@ -413,123 +342,41 @@ public class DocumentMapper implements ToXContent { } public ParsedDocument parse(SourceToParse source) throws MapperParsingException { - return parse(source, null); + return documentParser.parseDocument(source, null); } + // NOTE: do not use this method, it will be removed in the future once + // https://github.com/elastic/elasticsearch/issues/10736 is done (MLT api is the only user of this listener) public ParsedDocument parse(SourceToParse source, @Nullable ParseListener listener) throws MapperParsingException { - ParseContext.InternalParseContext context = cache.get(); - - if (source.type() != null && !source.type().equals(this.type)) { - throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + this.type + "]"); - } - source.type(this.type); - - XContentParser parser = source.parser(); - try { - if (parser == null) { - parser = XContentHelper.createParser(source.source()); - } - if (mapping.sourceTransforms.length > 0) { - parser = transform(parser); - } - context.reset(parser, new ParseContext.Document(), source, listener); - - // will result in START_OBJECT - int countDownTokens = 0; - XContentParser.Token token = parser.nextToken(); - if (token != XContentParser.Token.START_OBJECT) { - throw new MapperParsingException("Malformed content, must start with an object"); - } - boolean emptyDoc = false; - token = parser.nextToken(); - if (token == XContentParser.Token.END_OBJECT) { - // empty doc, we can handle it... - emptyDoc = true; - } else if (token != XContentParser.Token.FIELD_NAME) { - throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); - } - - for (RootMapper rootMapper : mapping.rootMappers) { - rootMapper.preParse(context); - } - - if (!emptyDoc) { - Mapper update = mapping.root.parse(context); - if (update != null) { - context.addDynamicMappingsUpdate((RootObjectMapper) update); - } - } - - for (int i = 0; i < countDownTokens; i++) { - parser.nextToken(); - } - - for (RootMapper rootMapper : mapping.rootMappers) { - rootMapper.postParse(context); - } - } catch (Throwable e) { - // if its already a mapper parsing exception, no need to wrap it... - if (e instanceof MapperParsingException) { - throw (MapperParsingException) e; - } - - // Throw a more meaningful message if the document is empty. - if (source.source() != null && source.source().length() == 0) { - throw new MapperParsingException("failed to parse, document is empty"); - } - - throw new MapperParsingException("failed to parse", e); - } finally { - // only close the parser when its not provided externally - if (source.parser() == null && parser != null) { - parser.close(); - } - } - // reverse the order of docs for nested docs support, parent should be last - if (context.docs().size() > 1) { - Collections.reverse(context.docs()); - } - // apply doc boost - if (context.docBoost() != 1.0f) { - Set encounteredFields = Sets.newHashSet(); - for (ParseContext.Document doc : context.docs()) { - encounteredFields.clear(); - for (IndexableField field : doc) { - if (field.fieldType().indexOptions() != IndexOptions.NONE && !field.fieldType().omitNorms()) { - if (!encounteredFields.contains(field.name())) { - ((Field) field).setBoost(context.docBoost() * field.boost()); - encounteredFields.add(field.name()); - } - } - } - } - } - - Mapper rootDynamicUpdate = context.dynamicMappingsUpdate(); - Mapping update = null; - if (rootDynamicUpdate != null) { - update = mapping.mappingUpdate(rootDynamicUpdate); - } - - ParsedDocument doc = new ParsedDocument(context.uid(), context.version(), context.id(), context.type(), source.routing(), source.timestamp(), source.ttl(), context.docs(), - context.source(), update).parent(source.parent()); - // reset the context to free up memory - context.reset(null, null, null, null); - return doc; + return documentParser.parseDocument(source, listener); } /** * Returns the best nested {@link ObjectMapper} instances that is in the scope of the specified nested docId. */ - public ObjectMapper findNestedObjectMapper(int nestedDocId, BitsetFilterCache cache, LeafReaderContext context) throws IOException { + public ObjectMapper findNestedObjectMapper(int nestedDocId, SearchContext sc, LeafReaderContext context) throws IOException { ObjectMapper nestedObjectMapper = null; for (ObjectMapper objectMapper : objectMappers().values()) { if (!objectMapper.nested().isNested()) { continue; } - BitDocIdSet nestedTypeBitSet = cache.getBitDocIdSetFilter(objectMapper.nestedTypeFilter()).getDocIdSet(context); - if (nestedTypeBitSet != null && nestedTypeBitSet.bits().get(nestedDocId)) { + Filter filter = objectMapper.nestedTypeFilter(); + if (filter == null) { + continue; + } + // We can pass down 'null' as acceptedDocs, because nestedDocId is a doc to be fetched and + // therefor is guaranteed to be a live doc. + DocIdSet nestedTypeSet = filter.getDocIdSet(context, null); + if (nestedTypeSet == null) { + continue; + } + DocIdSetIterator iterator = nestedTypeSet.iterator(); + if (iterator == null) { + continue; + } + + if (iterator.advance(nestedDocId) == nestedDocId) { if (nestedObjectMapper == null) { nestedObjectMapper = objectMapper; } else { @@ -563,22 +410,10 @@ public class DocumentMapper implements ToXContent { * @return transformed version of transformMe. This may actually be the same object as sourceAsMap */ public Map transformSourceAsMap(Map sourceAsMap) { - if (mapping.sourceTransforms.length == 0) { - return sourceAsMap; - } - for (SourceTransform transform : mapping.sourceTransforms) { - sourceAsMap = transform.transformSourceAsMap(sourceAsMap); - } - return sourceAsMap; + return DocumentParser.transformSourceAsMap(mapping, sourceAsMap); } - private XContentParser transform(XContentParser parser) throws IOException { - Map transformed = transformSourceAsMap(parser.mapOrderedAndClose()); - XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()).value(transformed); - return parser.contentType().xContent().createParser(builder.bytes()); - } - - public void addFieldMappers(List> fieldMappers) { + public void addFieldMappers(Collection> fieldMappers) { synchronized (mappersMutex) { this.fieldMappers = this.fieldMappers.copyAndAllAll(fieldMappers); } @@ -628,20 +463,20 @@ public class DocumentMapper implements ToXContent { mapping.root.traverse(listener); } - private MergeContext newMergeContext(MergeFlags mergeFlags) { - return new MergeContext(mergeFlags) { + private MergeResult newMergeContext(boolean simulate) { + return new MergeResult(simulate) { List conflicts = new ArrayList<>(); @Override - public void addFieldMappers(List> fieldMappers) { - assert mergeFlags().simulate() == false; + public void addFieldMappers(Collection> fieldMappers) { + assert simulate() == false; DocumentMapper.this.addFieldMappers(fieldMappers); } @Override public void addObjectMappers(Collection objectMappers) { - assert mergeFlags().simulate() == false; + assert simulate() == false; DocumentMapper.this.addObjectMappers(objectMappers); } @@ -663,10 +498,10 @@ public class DocumentMapper implements ToXContent { }; } - public synchronized MergeResult merge(Mapping mapping, MergeFlags mergeFlags) { - final MergeContext mergeContext = newMergeContext(mergeFlags); - final MergeResult mergeResult = this.mapping.merge(mapping, mergeContext); - if (mergeFlags.simulate() == false) { + public synchronized MergeResult merge(Mapping mapping, boolean simulate) { + final MergeResult mergeResult = newMergeContext(simulate); + this.mapping.merge(mapping, mergeResult); + if (simulate == false) { refreshSource(); } return mergeResult; @@ -687,7 +522,7 @@ public class DocumentMapper implements ToXContent { } public void close() { - cache.close(); + documentParser.close(); mapping.root.close(); for (RootMapper rootMapper : mapping.rootMappers) { rootMapper.close(); @@ -734,7 +569,7 @@ public class DocumentMapper implements ToXContent { public Map transformSourceAsMap(Map sourceAsMap) { try { // We use the ctx variable and the _source name to be consistent with the update api. - ExecutableScript executable = scriptService.executable(language, script, scriptType, ScriptContext.Standard.MAPPING, parameters); + ExecutableScript executable = scriptService.executable(new Script(language, script, scriptType, parameters), ScriptContext.Standard.MAPPING); Map ctx = new HashMap<>(1); ctx.put("_source", sourceAsMap); executable.setNextVar("ctx", ctx); @@ -742,7 +577,7 @@ public class DocumentMapper implements ToXContent { ctx = (Map) executable.unwrap(ctx); return (Map) ctx.get("_source"); } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("failed to execute script", e); + throw new IllegalArgumentException("failed to execute script", e); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java new file mode 100644 index 00000000000..9520bb117f1 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -0,0 +1,700 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper; + +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Sets; +import org.apache.lucene.document.Field; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.util.CloseableThreadLocal; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.mapper.internal.TypeFieldMapper; +import org.elasticsearch.index.mapper.internal.UidFieldMapper; +import org.elasticsearch.index.mapper.object.ArrayValueMapperParser; +import org.elasticsearch.index.mapper.object.ObjectMapper; +import org.elasticsearch.index.mapper.object.RootObjectMapper; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +/** A parser for documents, given mappings from a DocumentMapper */ +class DocumentParser implements Closeable { + + private CloseableThreadLocal cache = new CloseableThreadLocal() { + @Override + protected ParseContext.InternalParseContext initialValue() { + return new ParseContext.InternalParseContext(index, indexSettings, docMapperParser, docMapper, new ContentPath(0)); + } + }; + + private String index; + private Settings indexSettings; + private DocumentMapperParser docMapperParser; + private DocumentMapper docMapper; + + public DocumentParser(String index, Settings indexSettings, DocumentMapperParser docMapperParser, DocumentMapper docMapper) { + this.index = index; + this.indexSettings = indexSettings; + this.docMapperParser = docMapperParser; + this.docMapper = docMapper; + } + + public ParsedDocument parseDocument(SourceToParse source, @Nullable DocumentMapper.ParseListener listener) throws MapperParsingException { + ParseContext.InternalParseContext context = cache.get(); + + final Mapping mapping = docMapper.mapping(); + if (source.type() != null && !source.type().equals(docMapper.type())) { + throw new MapperParsingException("Type mismatch, provide type [" + source.type() + "] but mapper is of type [" + docMapper.type() + "]"); + } + source.type(docMapper.type()); + + XContentParser parser = source.parser(); + try { + if (parser == null) { + parser = XContentHelper.createParser(source.source()); + } + if (mapping.sourceTransforms.length > 0) { + parser = transform(mapping, parser); + } + context.reset(parser, new ParseContext.Document(), source, listener); + + // will result in START_OBJECT + int countDownTokens = 0; + XContentParser.Token token = parser.nextToken(); + if (token != XContentParser.Token.START_OBJECT) { + throw new MapperParsingException("Malformed content, must start with an object"); + } + boolean emptyDoc = false; + token = parser.nextToken(); + if (token == XContentParser.Token.END_OBJECT) { + // empty doc, we can handle it... + emptyDoc = true; + } else if (token != XContentParser.Token.FIELD_NAME) { + throw new MapperParsingException("Malformed content, after first object, either the type field or the actual properties should exist"); + } + + for (RootMapper rootMapper : mapping.rootMappers) { + rootMapper.preParse(context); + } + + if (!emptyDoc) { + Mapper update = parseObject(context, mapping.root); + if (update != null) { + context.addDynamicMappingsUpdate(update); + } + } + + for (int i = 0; i < countDownTokens; i++) { + parser.nextToken(); + } + + for (RootMapper rootMapper : mapping.rootMappers) { + rootMapper.postParse(context); + } + } catch (Throwable e) { + // if its already a mapper parsing exception, no need to wrap it... + if (e instanceof MapperParsingException) { + throw (MapperParsingException) e; + } + + // Throw a more meaningful message if the document is empty. + if (source.source() != null && source.source().length() == 0) { + throw new MapperParsingException("failed to parse, document is empty"); + } + + throw new MapperParsingException("failed to parse", e); + } finally { + // only close the parser when its not provided externally + if (source.parser() == null && parser != null) { + parser.close(); + } + } + // reverse the order of docs for nested docs support, parent should be last + if (context.docs().size() > 1) { + Collections.reverse(context.docs()); + } + // apply doc boost + if (context.docBoost() != 1.0f) { + Set encounteredFields = Sets.newHashSet(); + for (ParseContext.Document doc : context.docs()) { + encounteredFields.clear(); + for (IndexableField field : doc) { + if (field.fieldType().indexOptions() != IndexOptions.NONE && !field.fieldType().omitNorms()) { + if (!encounteredFields.contains(field.name())) { + ((Field) field).setBoost(context.docBoost() * field.boost()); + encounteredFields.add(field.name()); + } + } + } + } + } + + Mapper rootDynamicUpdate = context.dynamicMappingsUpdate(); + Mapping update = null; + if (rootDynamicUpdate != null) { + update = mapping.mappingUpdate(rootDynamicUpdate); + } + + ParsedDocument doc = new ParsedDocument(context.uid(), context.version(), context.id(), context.type(), source.routing(), source.timestamp(), source.ttl(), context.docs(), + context.source(), update).parent(source.parent()); + // reset the context to free up memory + context.reset(null, null, null, null); + return doc; + } + + static ObjectMapper parseObject(ParseContext context, ObjectMapper mapper) throws IOException { + if (mapper.isEnabled() == false) { + context.parser().skipChildren(); + return null; + } + XContentParser parser = context.parser(); + + String currentFieldName = parser.currentName(); + XContentParser.Token token = parser.currentToken(); + if (token == XContentParser.Token.VALUE_NULL) { + // the object is null ("obj1" : null), simply bail + return null; + } + + if (token.isValue()) { + throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value"); + } + + ObjectMapper.Nested nested = mapper.nested(); + if (nested.isNested()) { + context = context.createNestedContext(mapper.fullPath()); + ParseContext.Document nestedDoc = context.doc(); + ParseContext.Document parentDoc = nestedDoc.getParent(); + // pre add the uid field if possible (id was already provided) + IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); + if (uidField != null) { + // we don't need to add it as a full uid field in nested docs, since we don't need versioning + // we also rely on this for UidField#loadVersion + + // this is a deeply nested field + nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); + } + // the type of the nested doc starts with __, so we can identify that its a nested one in filters + // note, we don't prefix it with the type of the doc since it allows us to execute a nested query + // across types (for example, with similar nested objects) + nestedDoc.add(new Field(TypeFieldMapper.NAME, mapper.nestedTypePathAsString(), TypeFieldMapper.Defaults.FIELD_TYPE)); + } + + ContentPath.Type origPathType = context.path().pathType(); + context.path().pathType(mapper.pathType()); + + // if we are at the end of the previous object, advance + if (token == XContentParser.Token.END_OBJECT) { + token = parser.nextToken(); + } + if (token == XContentParser.Token.START_OBJECT) { + // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first + token = parser.nextToken(); + } + + ObjectMapper update = null; + while (token != XContentParser.Token.END_OBJECT) { + ObjectMapper newUpdate = null; + if (token == XContentParser.Token.START_OBJECT) { + newUpdate = parseObject(context, mapper, currentFieldName); + } else if (token == XContentParser.Token.START_ARRAY) { + newUpdate = parseArray(context, mapper, currentFieldName); + } else if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NULL) { + parseNullValue(context, mapper, currentFieldName); + } else if (token == null) { + throw new MapperParsingException("object mapping for [" + mapper.name() + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); + } else if (token.isValue()) { + newUpdate = parseValue(context, mapper, currentFieldName, token); + } + token = parser.nextToken(); + if (newUpdate != null) { + if (update == null) { + update = newUpdate; + } else { + MapperUtils.merge(update, newUpdate); + } + } + } + // restore the enable path flag + context.path().pathType(origPathType); + if (nested.isNested()) { + ParseContext.Document nestedDoc = context.doc(); + ParseContext.Document parentDoc = nestedDoc.getParent(); + if (nested.isIncludeInParent()) { + for (IndexableField field : nestedDoc.getFields()) { + if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { + continue; + } else { + parentDoc.add(field); + } + } + } + if (nested.isIncludeInRoot()) { + ParseContext.Document rootDoc = context.rootDoc(); + // don't add it twice, if its included in parent, and we are handling the master doc... + if (!nested.isIncludeInParent() || parentDoc != rootDoc) { + for (IndexableField field : nestedDoc.getFields()) { + if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { + continue; + } else { + rootDoc.add(field); + } + } + } + } + } + return update; + } + + private static Mapper parseObjectOrField(ParseContext context, Mapper mapper) throws IOException { + if (mapper instanceof ObjectMapper) { + return parseObject(context, (ObjectMapper) mapper); + } else { + FieldMapper fieldMapper = (FieldMapper)mapper; + Mapper update = fieldMapper.parse(context); + if (fieldMapper.copyTo() != null) { + parseCopyFields(context, fieldMapper, fieldMapper.copyTo().copyToFields()); + } + return update; + } + } + + private static ObjectMapper parseObject(final ParseContext context, ObjectMapper mapper, String currentFieldName) throws IOException { + if (currentFieldName == null) { + throw new MapperParsingException("object mapping [" + mapper.name() + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]"); + } + context.path().add(currentFieldName); + + ObjectMapper update = null; + Mapper objectMapper = mapper.getMapper(currentFieldName); + if (objectMapper != null) { + final Mapper subUpdate = parseObjectOrField(context, objectMapper); + if (subUpdate != null) { + // propagate mapping update + update = mapper.mappingUpdate(subUpdate); + } + } else { + ObjectMapper.Dynamic dynamic = mapper.dynamic(); + if (dynamic == null) { + dynamic = dynamicOrDefault(context.root().dynamic()); + } + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(mapper.fullPath(), currentFieldName); + } else if (dynamic == ObjectMapper.Dynamic.TRUE) { + // remove the current field name from path, since template search and the object builder add it as well... + context.path().remove(); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); + if (builder == null) { + builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(mapper.pathType()); + // if this is a non root object, then explicitly set the dynamic behavior if set + if (!(mapper instanceof RootObjectMapper) && mapper.dynamic() != ObjectMapper.Defaults.DYNAMIC) { + ((ObjectMapper.Builder) builder).dynamic(mapper.dynamic()); + } + } + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + objectMapper = builder.build(builderContext); + context.path().add(currentFieldName); + update = mapper.mappingUpdate(parseAndMergeUpdate(objectMapper, context)); + } else { + // not dynamic, read everything up to end object + context.parser().skipChildren(); + } + } + + context.path().remove(); + return update; + } + + private static ObjectMapper parseArray(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + String arrayFieldName = lastFieldName; + Mapper mapper = parentMapper.getMapper(lastFieldName); + if (mapper != null) { + // There is a concrete mapper for this field already. Need to check if the mapper + // expects an array, if so we pass the context straight to the mapper and if not + // we serialize the array components + if (mapper instanceof ArrayValueMapperParser) { + final Mapper subUpdate = parseObjectOrField(context, mapper); + if (subUpdate != null) { + // propagate the mapping update + return parentMapper.mappingUpdate(subUpdate); + } else { + return null; + } + } else { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } else { + + ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); + if (dynamic == null) { + dynamic = dynamicOrDefault(context.root().dynamic()); + } + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), arrayFieldName); + } else if (dynamic == ObjectMapper.Dynamic.TRUE) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object"); + if (builder == null) { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + mapper = builder.build(builderContext); + if (mapper != null && mapper instanceof ArrayValueMapperParser) { + context.path().add(arrayFieldName); + mapper = parseAndMergeUpdate(mapper, context); + return parentMapper.mappingUpdate(mapper); + } else { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } else { + return parseNonDynamicArray(context, parentMapper, lastFieldName, arrayFieldName); + } + } + } + + private static ObjectMapper parseNonDynamicArray(ParseContext context, ObjectMapper mapper, String lastFieldName, String arrayFieldName) throws IOException { + XContentParser parser = context.parser(); + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + if (token == XContentParser.Token.START_OBJECT) { + return parseObject(context, mapper, lastFieldName); + } else if (token == XContentParser.Token.START_ARRAY) { + return parseArray(context, mapper, lastFieldName); + } else if (token == XContentParser.Token.FIELD_NAME) { + lastFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NULL) { + parseNullValue(context, mapper, lastFieldName); + } else if (token == null) { + throw new MapperParsingException("object mapping for [" + mapper.name() + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); + } else { + return parseValue(context, mapper, lastFieldName, token); + } + } + return null; + } + + private static ObjectMapper parseValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + if (currentFieldName == null) { + throw new MapperParsingException("object mapping [" + parentMapper.name() + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); + } + Mapper mapper = parentMapper.getMapper(currentFieldName); + if (mapper != null) { + Mapper subUpdate = parseObjectOrField(context, mapper); + if (subUpdate == null) { + return null; + } + return parentMapper.mappingUpdate(subUpdate); + } else { + return parseDynamicValue(context, parentMapper, currentFieldName, token); + } + } + + private static void parseNullValue(ParseContext context, ObjectMapper parentMapper, String lastFieldName) throws IOException { + // we can only handle null values if we have mappings for them + Mapper mapper = parentMapper.getMapper(lastFieldName); + if (mapper != null) { + if (mapper instanceof FieldMapper) { + if (!((FieldMapper) mapper).supportsNullValue()) { + throw new MapperParsingException("no object mapping found for null value in [" + lastFieldName + "]"); + } + } + parseObjectOrField(context, mapper); + } else if (parentMapper.dynamic() == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), lastFieldName); + } + } + + private static ObjectMapper parseDynamicValue(final ParseContext context, ObjectMapper parentMapper, String currentFieldName, XContentParser.Token token) throws IOException { + ObjectMapper.Dynamic dynamic = parentMapper.dynamic(); + if (dynamic == null) { + dynamic = dynamicOrDefault(context.root().dynamic()); + } + if (dynamic == ObjectMapper.Dynamic.STRICT) { + throw new StrictDynamicMappingException(parentMapper.fullPath(), currentFieldName); + } + if (dynamic == ObjectMapper.Dynamic.FALSE) { + return null; + } + Mapper mapper = null; + Mapper.BuilderContext builderContext = new Mapper.BuilderContext(context.indexSettings(), context.path()); + if (token == XContentParser.Token.VALUE_STRING) { + boolean resolved = false; + + // do a quick test to see if its fits a dynamic template, if so, use it. + // we need to do it here so we can handle things like attachment templates, where calling + // text (to see if its a date) causes the binary value to be cleared + if (!resolved) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null); + if (builder != null) { + mapper = builder.build(builderContext); + resolved = true; + } + } + + if (!resolved && context.root().dateDetection()) { + String text = context.parser().text(); + // a safe check since "1" gets parsed as well + if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) { + for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { + try { + dateTimeFormatter.parser().parseMillis(text); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date"); + if (builder == null) { + builder = MapperBuilders.dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter); + } + mapper = builder.build(builderContext); + resolved = true; + break; + } catch (Exception e) { + // failure to parse this, continue + } + } + } + } + if (!resolved && context.root().numericDetection()) { + String text = context.parser().text(); + try { + Long.parseLong(text); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); + if (builder == null) { + builder = MapperBuilders.longField(currentFieldName); + } + mapper = builder.build(builderContext); + resolved = true; + } catch (Exception e) { + // not a long number + } + if (!resolved) { + try { + Double.parseDouble(text); + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); + if (builder == null) { + builder = MapperBuilders.doubleField(currentFieldName); + } + mapper = builder.build(builderContext); + resolved = true; + } catch (Exception e) { + // not a long number + } + } + } + if (!resolved) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string"); + if (builder == null) { + builder = MapperBuilders.stringField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (token == XContentParser.Token.VALUE_NUMBER) { + XContentParser.NumberType numberType = context.parser().numberType(); + if (numberType == XContentParser.NumberType.INT) { + if (context.parser().estimatedNumberType()) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); + if (builder == null) { + builder = MapperBuilders.longField(currentFieldName); + } + mapper = builder.build(builderContext); + } else { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer"); + if (builder == null) { + builder = MapperBuilders.integerField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (numberType == XContentParser.NumberType.LONG) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); + if (builder == null) { + builder = MapperBuilders.longField(currentFieldName); + } + mapper = builder.build(builderContext); + } else if (numberType == XContentParser.NumberType.FLOAT) { + if (context.parser().estimatedNumberType()) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); + if (builder == null) { + builder = MapperBuilders.doubleField(currentFieldName); + } + mapper = builder.build(builderContext); + } else { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float"); + if (builder == null) { + builder = MapperBuilders.floatField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (numberType == XContentParser.NumberType.DOUBLE) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); + if (builder == null) { + builder = MapperBuilders.doubleField(currentFieldName); + } + mapper = builder.build(builderContext); + } + } else if (token == XContentParser.Token.VALUE_BOOLEAN) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean"); + if (builder == null) { + builder = MapperBuilders.booleanField(currentFieldName); + } + mapper = builder.build(builderContext); + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary"); + if (builder == null) { + builder = MapperBuilders.binaryField(currentFieldName); + } + mapper = builder.build(builderContext); + } else { + Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null); + if (builder != null) { + mapper = builder.build(builderContext); + } else { + // TODO how do we identify dynamically that its a binary value? + throw new IllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); + } + } + + mapper = parseAndMergeUpdate(mapper, context); + + ObjectMapper update = null; + if (mapper != null) { + update = parentMapper.mappingUpdate(mapper); + } + return update; + } + + /** Creates instances of the fields that the current field should be copied to */ + private static void parseCopyFields(ParseContext context, FieldMapper fieldMapper, ImmutableList copyToFields) throws IOException { + if (!context.isWithinCopyTo() && copyToFields.isEmpty() == false) { + context = context.createCopyToContext(); + for (String field : copyToFields) { + // In case of a hierarchy of nested documents, we need to figure out + // which document the field should go to + ParseContext.Document targetDoc = null; + for (ParseContext.Document doc = context.doc(); doc != null; doc = doc.getParent()) { + if (field.startsWith(doc.getPrefix())) { + targetDoc = doc; + break; + } + } + assert targetDoc != null; + final ParseContext copyToContext; + if (targetDoc == context.doc()) { + copyToContext = context; + } else { + copyToContext = context.switchDoc(targetDoc); + } + parseCopy(field, copyToContext); + } + } + } + + /** Creates an copy of the current field with given field name and boost */ + private static void parseCopy(String field, ParseContext context) throws IOException { + // TODO: this should not be indexName... + FieldMappers mappers = context.docMapper().mappers().indexName(field); + if (mappers != null && !mappers.isEmpty()) { + mappers.mapper().parse(context); + } else { + // The path of the dest field might be completely different from the current one so we need to reset it + context = context.overridePath(new ContentPath(0)); + + ObjectMapper mapper = context.root(); + String objectPath = ""; + String fieldPath = field; + int posDot = field.lastIndexOf('.'); + if (posDot > 0) { + objectPath = field.substring(0, posDot); + context.path().add(objectPath); + mapper = context.docMapper().objectMappers().get(objectPath); + fieldPath = field.substring(posDot + 1); + } + if (mapper == null) { + //TODO: Create an object dynamically? + throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]"); + } + ObjectMapper update = parseDynamicValue(context, mapper, fieldPath, context.parser().currentToken()); + assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping + + // propagate the update to the root + while (objectPath.length() > 0) { + String parentPath = ""; + ObjectMapper parent = context.root(); + posDot = objectPath.lastIndexOf('.'); + if (posDot > 0) { + parentPath = objectPath.substring(0, posDot); + parent = context.docMapper().objectMappers().get(parentPath); + } + if (parent == null) { + throw new IllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]"); + } + update = parent.mappingUpdate(update); + objectPath = parentPath; + } + context.addDynamicMappingsUpdate(update); + } + } + + /** + * Parse the given {@code context} with the given {@code mapper} and apply + * the potential mapping update in-place. This method is useful when + * composing mapping updates. + */ + private static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { + final Mapper update = parseObjectOrField(context, mapper); + if (update != null) { + MapperUtils.merge(mapper, update); + } + return mapper; + } + + private static XContentParser transform(Mapping mapping, XContentParser parser) throws IOException { + Map transformed = transformSourceAsMap(mapping, parser.mapOrderedAndClose()); + XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType()).value(transformed); + return parser.contentType().xContent().createParser(builder.bytes()); + } + + private static ObjectMapper.Dynamic dynamicOrDefault(ObjectMapper.Dynamic dynamic) { + return dynamic == null ? ObjectMapper.Dynamic.TRUE : dynamic; + } + + static Map transformSourceAsMap(Mapping mapping, Map sourceAsMap) { + if (mapping.sourceTransforms.length == 0) { + return sourceAsMap; + } + for (Mapping.SourceTransform transform : mapping.sourceTransforms) { + sourceAsMap = transform.transformSourceAsMap(sourceAsMap); + } + return sourceAsMap; + } + + @Override + public void close() { + cache.close(); + } +} diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java index a17a2852757..2c19d0e326c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapper.java @@ -20,14 +20,15 @@ package org.elasticsearch.index.mapper; import com.google.common.base.Strings; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.index.fielddata.FieldDataType; @@ -35,6 +36,7 @@ import org.elasticsearch.index.mapper.core.AbstractFieldMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.similarity.SimilarityProvider; +import java.io.IOException; import java.util.List; /** @@ -295,4 +297,16 @@ public interface FieldMapper extends Mapper { * */ public boolean isGenerated(); + /** + * Parse using the provided {@link ParseContext} and return a mapping + * update if dynamic mappings modified the mappings, or {@code null} if + * mappings were not modified. + */ + Mapper parse(ParseContext context) throws IOException; + + /** + * @return a {@link FieldStats} instance that maps to the type of this field based on the provided {@link Terms} instance. + */ + FieldStats stats(Terms terms, int maxDoc) throws IOException; + } diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java b/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java index 3b7da0eb8c6..3251ed5203f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMapperListener.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.mapper; import java.util.ArrayList; +import java.util.Collection; import java.util.List; /** @@ -38,7 +39,7 @@ public abstract class FieldMapperListener { public abstract void fieldMapper(FieldMapper fieldMapper); - public void fieldMappers(List> fieldMappers) { + public void fieldMappers(Collection> fieldMappers) { for (FieldMapper mapper : fieldMappers) { fieldMapper(mapper); } diff --git a/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java b/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java index 2e72f0c6fff..60c1d143ead 100644 --- a/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java +++ b/src/main/java/org/elasticsearch/index/mapper/FieldMappersLookup.java @@ -49,7 +49,7 @@ class FieldMappersLookup implements Iterable> { /** * Return a new instance that contains the union of this instance and the provided mappers. */ - public FieldMappersLookup copyAndAddAll(Collection> newMappers) { + public FieldMappersLookup copyAndAddAll(Collection> newMappers) { CopyOnWriteHashMap map = this.mappers; for (FieldMapper mapper : newMappers) { diff --git a/src/main/java/org/elasticsearch/index/mapper/Mapper.java b/src/main/java/org/elasticsearch/index/mapper/Mapper.java index ae2f6acbf2d..1821dfd4436 100644 --- a/src/main/java/org/elasticsearch/index/mapper/Mapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/Mapper.java @@ -125,14 +125,7 @@ public interface Mapper extends ToXContent { String name(); - /** - * Parse using the provided {@link ParseContext} and return a mapping - * update if dynamic mappings modified the mappings, or {@code null} if - * mappings were not modified. - */ - Mapper parse(ParseContext context) throws IOException; - - void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException; + void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException; void traverse(FieldMapperListener fieldMapperListener); diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/src/main/java/org/elasticsearch/index/mapper/MapperService.java index 4bad191f88e..6fe69c809ff 100755 --- a/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.mapper; import com.carrotsearch.hppc.ObjectOpenHashSet; -import com.google.common.base.Charsets; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; - import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.apache.lucene.index.IndexOptions; @@ -40,20 +38,14 @@ import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.FailedToResolveConfigException; import org.elasticsearch.index.AbstractIndexComponent; import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisService; @@ -61,7 +53,6 @@ import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.Mapper.BuilderContext; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.similarity.SimilarityLookupService; import org.elasticsearch.indices.InvalidTypeNameException; @@ -70,9 +61,6 @@ import org.elasticsearch.percolator.PercolatorService; import org.elasticsearch.script.ScriptService; import java.io.IOException; -import java.net.MalformedURLException; -import java.net.URL; -import java.nio.file.Paths; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -82,7 +70,6 @@ import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import static org.elasticsearch.common.collect.MapBuilder.newMapBuilder; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; /** * @@ -127,7 +114,7 @@ public class MapperService extends AbstractIndexComponent { private volatile ImmutableMap> unmappedFieldMappers = ImmutableMap.of(); @Inject - public MapperService(Index index, @IndexSettings Settings indexSettings, Environment environment, AnalysisService analysisService, IndexFieldDataService fieldDataService, + public MapperService(Index index, @IndexSettings Settings indexSettings, AnalysisService analysisService, IndexFieldDataService fieldDataService, SimilarityLookupService similarityLookupService, ScriptService scriptService) { super(index, indexSettings); @@ -139,107 +126,36 @@ public class MapperService extends AbstractIndexComponent { this.searchQuoteAnalyzer = new SmartIndexNameSearchQuoteAnalyzer(analysisService.defaultSearchQuoteAnalyzer()); this.dynamic = indexSettings.getAsBoolean("index.mapper.dynamic", true); - String defaultMappingLocation = indexSettings.get("index.mapper.default_mapping_location"); - final URL defaultMappingUrl; + defaultPercolatorMappingSource = "{\n" + + "\"_default_\":{\n" + + "\"properties\" : {\n" + + "\"query\" : {\n" + + "\"type\" : \"object\",\n" + + "\"enabled\" : false\n" + + "}\n" + + "}\n" + + "}\n" + + "}"; if (index.getName().equals(ScriptService.SCRIPT_INDEX)){ - defaultMappingUrl = getMappingUrl(indexSettings, environment, defaultMappingLocation, "script-mapping.json", "org/elasticsearch/index/mapper/script-mapping.json"); - } else { - defaultMappingUrl = getMappingUrl(indexSettings, environment, defaultMappingLocation, "default-mapping.json", "org/elasticsearch/index/mapper/default-mapping.json"); - } - - if (defaultMappingUrl == null) { - logger.info("failed to find default-mapping.json in the classpath, using the default template"); - if (index.getName().equals(ScriptService.SCRIPT_INDEX)){ - defaultMappingSource = "{" + - "\"_default_\": {" + - "\"properties\": {" + + defaultMappingSource = "{" + + "\"_default_\": {" + + "\"properties\": {" + "\"script\": { \"enabled\": false }," + "\"template\": { \"enabled\": false }" + - "}" + - "}" + - "}"; - } else { - defaultMappingSource = "{\n" + - " \"_default_\":{\n" + - " }\n" + - "}"; - } + "}" + + "}" + + "}"; } else { - try { - defaultMappingSource = Streams.copyToString(FileSystemUtils.newBufferedReader(defaultMappingUrl, Charsets.UTF_8)); - } catch (IOException e) { - throw new MapperException("Failed to load default mapping source from [" + defaultMappingLocation + "]", e); - } - } - - String percolatorMappingLocation = indexSettings.get("index.mapper.default_percolator_mapping_location"); - URL percolatorMappingUrl = null; - if (percolatorMappingLocation != null) { - try { - percolatorMappingUrl = environment.resolveConfig(percolatorMappingLocation); - } catch (FailedToResolveConfigException e) { - // not there, default to the built in one - try { - percolatorMappingUrl = PathUtils.get(percolatorMappingLocation).toUri().toURL(); - } catch (MalformedURLException e1) { - throw new FailedToResolveConfigException("Failed to resolve default percolator mapping location [" + percolatorMappingLocation + "]"); - } - } - } - if (percolatorMappingUrl != null) { - try { - defaultPercolatorMappingSource = Streams.copyToString(FileSystemUtils.newBufferedReader(percolatorMappingUrl, Charsets.UTF_8)); - } catch (IOException e) { - throw new MapperException("Failed to load default percolator mapping source from [" + percolatorMappingUrl + "]", e); - } - } else { - defaultPercolatorMappingSource = "{\n" + - //" \"" + PercolatorService.TYPE_NAME + "\":{\n" + - " \"" + "_default_" + "\":{\n" + - " \"properties\" : {\n" + - " \"query\" : {\n" + - " \"type\" : \"object\",\n" + - " \"enabled\" : false\n" + - " }\n" + - " }\n" + - " }\n" + - "}"; + defaultMappingSource = "{\"_default_\":{}}"; } if (logger.isTraceEnabled()) { - logger.trace("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}] and source[{}], default percolator mapping: location[{}], loaded_from[{}] and source[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, defaultMappingSource, percolatorMappingLocation, percolatorMappingUrl, defaultPercolatorMappingSource); + logger.trace("using dynamic[{}], default mapping source[{}], default percolator mapping source[{}]", dynamic, defaultMappingSource, defaultPercolatorMappingSource); } else if (logger.isDebugEnabled()) { - logger.debug("using dynamic[{}], default mapping: default_mapping_location[{}], loaded_from[{}], default percolator mapping: location[{}], loaded_from[{}]", dynamic, defaultMappingLocation, defaultMappingUrl, percolatorMappingLocation, percolatorMappingUrl); + logger.debug("using dynamic[{}]", dynamic); } } - private URL getMappingUrl(Settings indexSettings, Environment environment, String mappingLocation, String configString, String resourceLocation) { - URL mappingUrl; - if (mappingLocation == null) { - try { - mappingUrl = environment.resolveConfig(configString); - } catch (FailedToResolveConfigException e) { - // not there, default to the built in one - mappingUrl = indexSettings.getClassLoader().getResource(resourceLocation); - if (mappingUrl == null) { - mappingUrl = MapperService.class.getClassLoader().getResource(resourceLocation); - } - } - } else { - try { - mappingUrl = environment.resolveConfig(mappingLocation); - } catch (FailedToResolveConfigException e) { - // not there, default to the built in one - try { - mappingUrl = PathUtils.get(mappingLocation).toUri().toURL(); - } catch (MalformedURLException e1) { - throw new FailedToResolveConfigException("Failed to resolve dynamic mapping location [" + mappingLocation + "]"); - } - } - } - return mappingUrl; - } - public void close() { for (DocumentMapper documentMapper : mappers.values()) { documentMapper.close(); @@ -339,11 +255,11 @@ public class MapperService extends AbstractIndexComponent { DocumentMapper oldMapper = mappers.get(mapper.type()); if (oldMapper != null) { - DocumentMapper.MergeResult result = oldMapper.merge(mapper.mapping(), mergeFlags().simulate(false)); + MergeResult result = oldMapper.merge(mapper.mapping(), false); if (result.hasConflicts()) { // TODO: What should we do??? if (logger.isDebugEnabled()) { - logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.conflicts())); + logger.debug("merging mapping for type [{}] resulted in conflicts: [{}]", mapper.type(), Arrays.toString(result.buildConflicts())); } } fieldDataService.onMappingUpdate(); @@ -388,7 +304,7 @@ public class MapperService extends AbstractIndexComponent { } } - private void addFieldMappers(List> fieldMappers) { + private void addFieldMappers(Collection> fieldMappers) { synchronized (mappersMutex) { this.fieldMappers = this.fieldMappers.copyAndAddAll(fieldMappers); } @@ -455,12 +371,12 @@ public class MapperService extends AbstractIndexComponent { if (hasNested && filterPercolateType) { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); - bq.add(NonNestedDocsFilter.INSTANCE, Occur.MUST); - return Queries.wrap(bq); + bq.add(Queries.newNonNestedFilter(), Occur.MUST); + return new QueryWrapperFilter(bq); } else if (hasNested) { - return NonNestedDocsFilter.INSTANCE; + return Queries.newNonNestedFilter(); } else if (filterPercolateType) { - return Queries.wrap(Queries.not(percolatorType)); + return new QueryWrapperFilter(Queries.not(percolatorType)); } else { return null; } @@ -469,12 +385,12 @@ public class MapperService extends AbstractIndexComponent { // since they have different types (starting with __) if (types.length == 1) { DocumentMapper docMapper = documentMapper(types[0]); - Filter filter = docMapper != null ? docMapper.typeFilter() : Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); + Filter filter = docMapper != null ? docMapper.typeFilter() : new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, types[0]))); if (filterPercolateType) { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(filter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { return filter; } @@ -504,9 +420,9 @@ public class MapperService extends AbstractIndexComponent { BooleanQuery bq = new BooleanQuery(); bq.add(percolatorType, Occur.MUST_NOT); bq.add(termsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(termsFilter); + return new QueryWrapperFilter(termsFilter); } } else { // Current bool filter requires that at least one should clause matches, even with a must clause. @@ -523,10 +439,10 @@ public class MapperService extends AbstractIndexComponent { bool.add(percolatorType, BooleanClause.Occur.MUST_NOT); } if (hasNested) { - bool.add(NonNestedDocsFilter.INSTANCE, BooleanClause.Occur.MUST); + bool.add(Queries.newNonNestedFilter(), BooleanClause.Occur.MUST); } - return Queries.wrap(bool); + return new QueryWrapperFilter(bool); } } @@ -721,7 +637,7 @@ public class MapperService extends AbstractIndexComponent { final Mapper.TypeParser.ParserContext parserContext = documentMapperParser().parserContext(); Mapper.TypeParser typeParser = parserContext.typeParser(type); if (typeParser == null) { - throw new ElasticsearchIllegalArgumentException("No mapper found for type [" + type + "]"); + throw new IllegalArgumentException("No mapper found for type [" + type + "]"); } final Mapper.Builder builder = typeParser.parse("__anonymous_" + type, ImmutableMap.of(), parserContext); final BuilderContext builderContext = new BuilderContext(indexSettings, new ContentPath(1)); @@ -936,7 +852,7 @@ public class MapperService extends AbstractIndexComponent { } @Override - public void fieldMappers(List> fieldMappers) { + public void fieldMappers(Collection> fieldMappers) { addFieldMappers(fieldMappers); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java b/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java index df59743b0cf..d83cd76440a 100644 --- a/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java +++ b/src/main/java/org/elasticsearch/index/mapper/MapperUtils.java @@ -24,26 +24,14 @@ import org.elasticsearch.index.mapper.object.ObjectMapper; import java.io.IOException; import java.util.Collection; -import java.util.List; public enum MapperUtils { ; - /** - * Parse the given {@code context} with the given {@code mapper} and apply - * the potential mapping update in-place. This method is useful when - * composing mapping updates. - */ - public static M parseAndMergeUpdate(M mapper, ParseContext context) throws IOException { - final Mapper update = mapper.parse(context); - if (update != null) { - merge(mapper, update); - } - return mapper; - } - private static MergeContext newStrictMergeContext() { - return new MergeContext(new DocumentMapper.MergeFlags().simulate(false)) { + + private static MergeResult newStrictMergeContext() { + return new MergeResult(false) { @Override public boolean hasConflicts() { @@ -61,7 +49,7 @@ public enum MapperUtils { } @Override - public void addFieldMappers(List> fieldMappers) { + public void addFieldMappers(Collection> fieldMappers) { // no-op } diff --git a/src/main/java/org/elasticsearch/index/mapper/Mapping.java b/src/main/java/org/elasticsearch/index/mapper/Mapping.java index 62e89bfe209..b8bbf05616b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/Mapping.java +++ b/src/main/java/org/elasticsearch/index/mapper/Mapping.java @@ -25,7 +25,6 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.index.mapper.DocumentMapper.MergeResult; import org.elasticsearch.index.mapper.object.RootObjectMapper; import java.io.IOException; @@ -95,11 +94,11 @@ public final class Mapping implements ToXContent { return (T) rootMappersMap.get(clazz); } - /** @see DocumentMapper#merge(DocumentMapper, org.elasticsearch.index.mapper.DocumentMapper.MergeFlags) */ - public MergeResult merge(Mapping mergeWith, MergeContext mergeContext) { + /** @see DocumentMapper#merge(Mapping, boolean) */ + public void merge(Mapping mergeWith, MergeResult mergeResult) { assert rootMappers.length == mergeWith.rootMappers.length; - root.merge(mergeWith.root, mergeContext); + root.merge(mergeWith.root, mergeResult); for (RootMapper rootMapper : rootMappers) { // root mappers included in root object will get merge in the rootObjectMapper if (rootMapper.includeInObject()) { @@ -107,15 +106,14 @@ public final class Mapping implements ToXContent { } RootMapper mergeWithRootMapper = mergeWith.rootMapper(rootMapper.getClass()); if (mergeWithRootMapper != null) { - rootMapper.merge(mergeWithRootMapper, mergeContext); + rootMapper.merge(mergeWithRootMapper, mergeResult); } } - if (mergeContext.mergeFlags().simulate() == false) { + if (mergeResult.simulate() == false) { // let the merge with attributes to override the attributes meta = mergeWith.meta; } - return new MergeResult(mergeContext.buildConflicts()); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/MergeContext.java b/src/main/java/org/elasticsearch/index/mapper/MergeResult.java similarity index 76% rename from src/main/java/org/elasticsearch/index/mapper/MergeContext.java rename to src/main/java/org/elasticsearch/index/mapper/MergeResult.java index f8ddb837517..ab685f624ef 100644 --- a/src/main/java/org/elasticsearch/index/mapper/MergeContext.java +++ b/src/main/java/org/elasticsearch/index/mapper/MergeResult.java @@ -21,26 +21,25 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.index.mapper.object.ObjectMapper; +import java.util.ArrayList; import java.util.Collection; import java.util.List; -/** - * - */ -public abstract class MergeContext { +/** A container for tracking results of a mapping merge. */ +public abstract class MergeResult { - private final DocumentMapper.MergeFlags mergeFlags; + private final boolean simulate; - public MergeContext(DocumentMapper.MergeFlags mergeFlags) { - this.mergeFlags = mergeFlags; + public MergeResult(boolean simulate) { + this.simulate = simulate; } - public abstract void addFieldMappers(List> fieldMappers); + public abstract void addFieldMappers(Collection> fieldMappers); public abstract void addObjectMappers(Collection objectMappers); - public DocumentMapper.MergeFlags mergeFlags() { - return mergeFlags; + public boolean simulate() { + return simulate; } public abstract void addConflict(String mergeFailure); diff --git a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java index 6530af5e0c6..b608cf8a551 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ParseContext.java +++ b/src/main/java/org/elasticsearch/index/mapper/ParseContext.java @@ -27,8 +27,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.all.AllEntries; @@ -110,7 +108,7 @@ public abstract class ParseContext { if (keyedFields == null) { keyedFields = new ObjectObjectOpenHashMap<>(); } else if (keyedFields.containsKey(key)) { - throw new ElasticsearchIllegalStateException("Only one field can be stored per key"); + throw new IllegalStateException("Only one field can be stored per key"); } keyedFields.put(key, field); add(field); @@ -790,7 +788,7 @@ public abstract class ParseContext { } public Object externalValue() { - throw new ElasticsearchIllegalStateException("External value is not set"); + throw new IllegalStateException("External value is not set"); } /** @@ -804,7 +802,7 @@ public abstract class ParseContext { } if (!clazz.isInstance(externalValue())) { - throw new ElasticsearchIllegalArgumentException("illegal external value class [" + throw new IllegalArgumentException("illegal external value class [" + externalValue().getClass().getName() + "]. Should be " + clazz.getName()); } return clazz.cast(externalValue()); diff --git a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java index a9ab088d285..8d004a31a91 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/AbstractFieldMapper.java @@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.base.Objects; import com.google.common.collect.ImmutableList; + import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; @@ -35,13 +36,14 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.lucene.BytesRefs; @@ -53,16 +55,7 @@ import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.FieldMapperListener; -import org.elasticsearch.index.mapper.FieldMappers; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; -import org.elasticsearch.index.mapper.MergeMappingException; -import org.elasticsearch.index.mapper.ObjectMapperListener; -import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.*; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; @@ -436,9 +429,6 @@ public abstract class AbstractFieldMapper implements FieldMapper { throw new MapperParsingException("failed to parse [" + names.fullName() + "]", e); } multiFields.parse(this, context); - if (copyTo != null) { - copyTo.parse(context); - } return null; } @@ -492,7 +482,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter termFilter(Object value, @Nullable QueryParseContext context) { - return Queries.wrap(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); } @Override @@ -511,7 +501,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { for (int i = 0; i < bytesRefs.length; i++) { bytesRefs[i] = indexedValueForSearch(values.get(i)); } - return Queries.wrap(new TermsQuery(names.indexName(), bytesRefs)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), bytesRefs)); } } @@ -541,7 +531,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(new TermRangeQuery(names.indexName(), + return new QueryWrapperFilter(new TermRangeQuery(names.indexName(), lowerTerm == null ? null : indexedValueForSearch(lowerTerm), upperTerm == null ? null : indexedValueForSearch(upperTerm), includeLower, includeUpper)); @@ -563,7 +553,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter prefixFilter(Object value, @Nullable QueryParseContext context) { - return Queries.wrap(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); + return new QueryWrapperFilter(new PrefixQuery(names().createIndexNameTerm(indexedValueForSearch(value)))); } @Override @@ -577,7 +567,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { @Override public Filter regexpFilter(Object value, int flags, int maxDeterminizedStates, @Nullable QueryParseContext parseContext) { - return Queries.wrap(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); + return new QueryWrapperFilter(new RegexpQuery(names().createIndexNameTerm(indexedValueForSearch(value)), flags, maxDeterminizedStates)); } @Override @@ -586,13 +576,13 @@ public abstract class AbstractFieldMapper implements FieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (!this.getClass().equals(mergeWith.getClass())) { String mergedType = mergeWith.getClass().getSimpleName(); if (mergeWith instanceof AbstractFieldMapper) { mergedType = ((AbstractFieldMapper) mergeWith).contentType(); } - mergeContext.addConflict("mapper [" + names.fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); + mergeResult.addConflict("mapper [" + names.fullName() + "] of different type, current_type [" + contentType() + "], merged_type [" + mergedType + "]"); // different types, return return; } @@ -600,62 +590,62 @@ public abstract class AbstractFieldMapper implements FieldMapper { boolean indexed = fieldType.indexOptions() != IndexOptions.NONE; boolean mergeWithIndexed = fieldMergeWith.fieldType().indexOptions() != IndexOptions.NONE; if (indexed != mergeWithIndexed || this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different index values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different index values"); } if (this.fieldType().stored() != fieldMergeWith.fieldType().stored()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store values"); } if (!this.hasDocValues() && fieldMergeWith.hasDocValues()) { // don't add conflict if this mapper has doc values while the mapper to merge doesn't since doc values are implicitely set // when the doc_values field data format is configured - mergeContext.addConflict("mapper [" + names.fullName() + "] has different " + TypeParsers.DOC_VALUES + " values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different " + TypeParsers.DOC_VALUES + " values"); } if (this.fieldType().omitNorms() && !fieldMergeWith.fieldType.omitNorms()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] cannot enable norms (`norms.enabled`)"); + mergeResult.addConflict("mapper [" + names.fullName() + "] cannot enable norms (`norms.enabled`)"); } if (this.fieldType().tokenized() != fieldMergeWith.fieldType().tokenized()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different tokenize values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different tokenize values"); } if (this.fieldType().storeTermVectors() != fieldMergeWith.fieldType().storeTermVectors()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector values"); } if (this.fieldType().storeTermVectorOffsets() != fieldMergeWith.fieldType().storeTermVectorOffsets()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_offsets values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_offsets values"); } if (this.fieldType().storeTermVectorPositions() != fieldMergeWith.fieldType().storeTermVectorPositions()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_positions values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_positions values"); } if (this.fieldType().storeTermVectorPayloads() != fieldMergeWith.fieldType().storeTermVectorPayloads()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_payloads values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different store_term_vector_payloads values"); } // null and "default"-named index analyzers both mean the default is used if (this.indexAnalyzer == null || "default".equals(this.indexAnalyzer.name())) { if (fieldMergeWith.indexAnalyzer != null && !"default".equals(fieldMergeWith.indexAnalyzer.name())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different analyzer"); } } else if (fieldMergeWith.indexAnalyzer == null || "default".equals(fieldMergeWith.indexAnalyzer.name())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different analyzer"); } else if (!this.indexAnalyzer.name().equals(fieldMergeWith.indexAnalyzer.name())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different analyzer"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different analyzer"); } if (!this.names().equals(fieldMergeWith.names())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different index_name"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different index_name"); } if (this.similarity == null) { if (fieldMergeWith.similarity() != null) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different similarity"); } } else if (fieldMergeWith.similarity() == null) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different similarity"); } else if (!this.similarity().equals(fieldMergeWith.similarity())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different similarity"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different similarity"); } - multiFields.merge(mergeWith, mergeContext); + multiFields.merge(mergeWith, mergeResult); - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { // apply changeable values this.fieldType = new FieldType(this.fieldType); this.fieldType.setOmitNorms(fieldMergeWith.fieldType.omitNorms()); @@ -777,7 +767,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { case DOCS: return TypeParsers.INDEX_OPTIONS_DOCS; default: - throw new ElasticsearchIllegalArgumentException("Unknown IndexOptions [" + indexOption + "]"); + throw new IllegalArgumentException("Unknown IndexOptions [" + indexOption + "]"); } } @@ -844,7 +834,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { public static class MultiFields { public static MultiFields empty() { - return new MultiFields(Defaults.PATH_TYPE, ImmutableOpenMap.of()); + return new MultiFields(Defaults.PATH_TYPE, ImmutableOpenMap.of()); } public static class Builder { @@ -867,7 +857,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { if (pathType == Defaults.PATH_TYPE && mapperBuilders.isEmpty()) { return empty(); } else if (mapperBuilders.isEmpty()) { - return new MultiFields(pathType, ImmutableOpenMap.of()); + return new MultiFields(pathType, ImmutableOpenMap.of()); } else { ContentPath.Type origPathType = context.path().pathType(); context.path().pathType(pathType); @@ -876,26 +866,27 @@ public abstract class AbstractFieldMapper implements FieldMapper { for (ObjectObjectCursor cursor : this.mapperBuilders) { String key = cursor.key; Mapper.Builder value = cursor.value; - mapperBuilders.put(key, value.build(context)); + Mapper mapper = value.build(context); + assert mapper instanceof FieldMapper; + mapperBuilders.put(key, mapper); } context.path().remove(); context.path().pathType(origPathType); - ImmutableOpenMap.Builder mappers = mapperBuilders.cast(); + ImmutableOpenMap.Builder mappers = mapperBuilders.cast(); return new MultiFields(pathType, mappers.build()); } } - } private final ContentPath.Type pathType; - private volatile ImmutableOpenMap mappers; + private volatile ImmutableOpenMap mappers; - public MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { + public MultiFields(ContentPath.Type pathType, ImmutableOpenMap mappers) { this.pathType = pathType; this.mappers = mappers; // we disable the all in multi-field mappers - for (ObjectCursor cursor : mappers.values()) { - Mapper mapper = cursor.value; + for (ObjectCursor cursor : mappers.values()) { + FieldMapper mapper = cursor.value; if (mapper instanceof AllFieldMapper.IncludeInAll) { ((AllFieldMapper.IncludeInAll) mapper).unsetIncludeInAll(); } @@ -903,6 +894,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { } public void parse(AbstractFieldMapper mainField, ParseContext context) throws IOException { + // TODO: multi fields are really just copy fields, we just need to expose "sub fields" or something that can be part of the mappings if (mappers.isEmpty()) { return; } @@ -913,7 +905,7 @@ public abstract class AbstractFieldMapper implements FieldMapper { context.path().pathType(pathType); context.path().add(mainField.name()); - for (ObjectCursor cursor : mappers.values()) { + for (ObjectCursor cursor : mappers.values()) { cursor.value.parse(context); } context.path().remove(); @@ -921,18 +913,18 @@ public abstract class AbstractFieldMapper implements FieldMapper { } // No need for locking, because locking is taken care of in ObjectMapper#merge and DocumentMapper#merge - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { AbstractFieldMapper mergeWithMultiField = (AbstractFieldMapper) mergeWith; List> newFieldMappers = null; - ImmutableOpenMap.Builder newMappersBuilder = null; + ImmutableOpenMap.Builder newMappersBuilder = null; - for (ObjectCursor cursor : mergeWithMultiField.multiFields.mappers.values()) { - Mapper mergeWithMapper = cursor.value; + for (ObjectCursor cursor : mergeWithMultiField.multiFields.mappers.values()) { + FieldMapper mergeWithMapper = cursor.value; Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name()); if (mergeIntoMapper == null) { // no mapping, simply add it if not simulating - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { // we disable the all in multi-field mappers if (mergeWithMapper instanceof AllFieldMapper.IncludeInAll) { ((AllFieldMapper.IncludeInAll) mergeWithMapper).unsetIncludeInAll(); @@ -945,17 +937,17 @@ public abstract class AbstractFieldMapper implements FieldMapper { if (newFieldMappers == null) { newFieldMappers = new ArrayList<>(2); } - newFieldMappers.add((FieldMapper) mergeWithMapper); + newFieldMappers.add(mergeWithMapper); } } } else { - mergeIntoMapper.merge(mergeWithMapper, mergeContext); + mergeIntoMapper.merge(mergeWithMapper, mergeResult); } } // first add all field mappers if (newFieldMappers != null) { - mergeContext.addFieldMappers(newFieldMappers); + mergeResult.addFieldMappers(newFieldMappers); } // now publish mappers if (newMappersBuilder != null) { @@ -964,13 +956,13 @@ public abstract class AbstractFieldMapper implements FieldMapper { } public void traverse(FieldMapperListener fieldMapperListener) { - for (ObjectCursor cursor : mappers.values()) { + for (ObjectCursor cursor : mappers.values()) { cursor.value.traverse(fieldMapperListener); } } public void close() { - for (ObjectCursor cursor : mappers.values()) { + for (ObjectCursor cursor : mappers.values()) { cursor.value.close(); } } @@ -1009,34 +1001,6 @@ public abstract class AbstractFieldMapper implements FieldMapper { this.copyToFields = copyToFields; } - /** - * Creates instances of the fields that the current field should be copied to - */ - public void parse(ParseContext context) throws IOException { - if (!context.isWithinCopyTo() && copyToFields.isEmpty() == false) { - context = context.createCopyToContext(); - for (String field : copyToFields) { - // In case of a hierarchy of nested documents, we need to figure out - // which document the field should go to - Document targetDoc = null; - for (Document doc = context.doc(); doc != null; doc = doc.getParent()) { - if (field.startsWith(doc.getPrefix())) { - targetDoc = doc; - break; - } - } - assert targetDoc != null; - final ParseContext copyToContext; - if (targetDoc == context.doc()) { - copyToContext = context; - } else { - copyToContext = context.switchDoc(targetDoc); - } - parse(field, copyToContext); - } - } - } - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { if (!copyToFields.isEmpty()) { builder.startArray("copy_to"); @@ -1064,53 +1028,6 @@ public abstract class AbstractFieldMapper implements FieldMapper { public ImmutableList copyToFields() { return copyToFields; } - - /** - * Creates an copy of the current field with given field name and boost - */ - public void parse(String field, ParseContext context) throws IOException { - FieldMappers mappers = context.docMapper().mappers().indexName(field); - if (mappers != null && !mappers.isEmpty()) { - mappers.mapper().parse(context); - } else { - // The path of the dest field might be completely different from the current one so we need to reset it - context = context.overridePath(new ContentPath(0)); - - ObjectMapper mapper = context.root(); - String objectPath = ""; - String fieldPath = field; - int posDot = field.lastIndexOf('.'); - if (posDot > 0) { - objectPath = field.substring(0, posDot); - context.path().add(objectPath); - mapper = context.docMapper().objectMappers().get(objectPath); - fieldPath = field.substring(posDot + 1); - } - if (mapper == null) { - //TODO: Create an object dynamically? - throw new MapperParsingException("attempt to copy value to non-existing object [" + field + "]"); - } - ObjectMapper update = mapper.parseDynamicValue(context, fieldPath, context.parser().currentToken()); - assert update != null; // we are parsing a dynamic value so we necessarily created a new mapping - - // propagate the update to the root - while (objectPath.length() > 0) { - String parentPath = ""; - ObjectMapper parent = context.root(); - posDot = objectPath.lastIndexOf('.'); - if (posDot > 0) { - parentPath = objectPath.substring(0, posDot); - parent = context.docMapper().objectMappers().get(parentPath); - } - if (parent == null) { - throw new ElasticsearchIllegalStateException("[" + objectPath + "] has no parent for path [" + parentPath + "]"); - } - update = parent.mappingUpdate(update); - objectPath = parentPath; - } - context.addDynamicMappingsUpdate((RootObjectMapper) update); - } - } } /** @@ -1121,4 +1038,10 @@ public abstract class AbstractFieldMapper implements FieldMapper { return false; } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + return new FieldStats.Text( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), terms.getMin(), terms.getMax() + ); + } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java index 5154b2074e1..83dfb2c0782 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BinaryFieldMapper.java @@ -45,7 +45,7 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; @@ -245,14 +245,14 @@ public class BinaryFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } BinaryFieldMapper sourceMergeWith = (BinaryFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (sourceMergeWith.compress != null) { this.compress = sourceMergeWith.compress; } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java index 18344809168..d01ff743279 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/BooleanFieldMapper.java @@ -24,21 +24,20 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedNumericDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -94,7 +93,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper { @Override public Builder tokenized(boolean tokenized) { if (tokenized) { - throw new ElasticsearchIllegalArgumentException("bool field can't be tokenized"); + throw new IllegalArgumentException("bool field can't be tokenized"); } return super.tokenized(tokenized); } @@ -206,7 +205,7 @@ public class BooleanFieldMapper extends AbstractFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(nullValue ? Values.TRUE : Values.FALSE))); } @Override @@ -237,13 +236,13 @@ public class BooleanFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((BooleanFieldMapper) mergeWith).nullValue; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java index 66d87a77aea..37a6f0a33b5 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ByteFieldMapper.java @@ -23,17 +23,18 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -44,7 +45,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -211,7 +212,7 @@ public class ByteFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValueAsInt(lowerTerm), upperTerm == null ? null : parseValueAsInt(upperTerm), includeLower, includeUpper)); @@ -230,7 +231,7 @@ public class ByteFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue.intValue(), nullValue.intValue(), true, true)); @@ -294,7 +295,7 @@ public class ByteFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -326,12 +327,12 @@ public class ByteFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((ByteFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((ByteFieldMapper) mergeWith).nullValueAsString; } @@ -354,6 +355,15 @@ public class ByteFieldMapper extends NumberFieldMapper { } } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinInt(terms); + long maxValue = NumericUtils.getMaxInt(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomByteNumericField extends CustomNumericField { private final byte number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java index 7e037bd533c..83b8781af9e 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/CompletionFieldMapper.java @@ -28,8 +28,6 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.search.suggest.analyzing.XAnalyzingSuggester; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; @@ -44,7 +42,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperException; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.similarity.SimilarityProvider; @@ -137,7 +135,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { public Builder maxInputLength(int maxInputLength) { if (maxInputLength <= 0) { - throw new ElasticsearchIllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]"); + throw new IllegalArgumentException(Fields.MAX_INPUT_LENGTH.getPreferredName() + " must be > 0 but was [" + maxInputLength + "]"); } this.maxInputLength = maxInputLength; return this; @@ -215,7 +213,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { private NamedAnalyzer getNamedAnalyzer(ParserContext parserContext, String name) { NamedAnalyzer analyzer = parserContext.analysisService().analyzer(name); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]"); + throw new IllegalArgumentException("Can't find default or mapped analyzer with name [" + name + "]"); } return analyzer; } @@ -257,7 +255,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { public synchronized PostingsFormat postingsFormat(PostingsFormat in) { if (in instanceof Completion090PostingsFormat) { - throw new ElasticsearchIllegalStateException("Double wrapping of " + Completion090PostingsFormat.class); + throw new IllegalStateException("Double wrapping of " + Completion090PostingsFormat.class); } if (postingsFormat == null) { postingsFormat = new Completion090PostingsFormat(in, analyzingSuggestLookupProvider); @@ -286,7 +284,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); if (!ALLOWED_CONTENT_FIELD_NAMES.contains(currentFieldName)) { - throw new ElasticsearchIllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES); + throw new IllegalArgumentException("Unknown field name[" + currentFieldName + "], must be one of " + ALLOWED_CONTENT_FIELD_NAMES); } } else if (Fields.CONTEXT.equals(currentFieldName)) { SortedMap configs = Maps.newTreeMap(); @@ -335,7 +333,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { try { weightValue = Long.parseLong(parser.text()); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Weight must be a string representing a numeric value, but was [" + parser.text() + "]"); + throw new IllegalArgumentException("Weight must be a string representing a numeric value, but was [" + parser.text() + "]"); } weight = weightValue.longValue(); // always parse a long to make sure we don't get overflow checkWeight(weight); @@ -344,7 +342,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { if (Fields.CONTENT_FIELD_NAME_WEIGHT.equals(currentFieldName)) { NumberType numberType = parser.numberType(); if (NumberType.LONG != numberType && NumberType.INT != numberType) { - throw new ElasticsearchIllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]"); + throw new IllegalArgumentException("Weight must be an integer, but was [" + parser.numberValue() + "]"); } weight = parser.longValue(); // always parse a long to make sure we don't get overflow checkWeight(weight); @@ -387,7 +385,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { private void checkWeight(long weight) { if (weight < 0 || weight > Integer.MAX_VALUE) { - throw new ElasticsearchIllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]"); + throw new IllegalArgumentException("Weight must be in the interval [0..2147483647], but was [" + weight + "]"); } } @@ -415,7 +413,7 @@ public class CompletionFieldMapper extends AbstractFieldMapper { } for (int i = 0; i < input.length(); i++) { if (isReservedChar(input.charAt(i))) { - throw new ElasticsearchIllegalArgumentException("Illegal input [" + originalInput + "] UTF-16 codepoint [0x" + throw new IllegalArgumentException("Illegal input [" + originalInput + "] UTF-16 codepoint [0x" + Integer.toHexString((int) input.charAt(i)).toUpperCase(Locale.ROOT) + "] at position " + i + " is a reserved character"); } @@ -523,22 +521,22 @@ public class CompletionFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); CompletionFieldMapper fieldMergeWith = (CompletionFieldMapper) mergeWith; if (payloads != fieldMergeWith.payloads) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different payload values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different payload values"); } if (preservePositionIncrements != fieldMergeWith.preservePositionIncrements) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_position_increments' values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different 'preserve_position_increments' values"); } if (preserveSeparators != fieldMergeWith.preserveSeparators) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'preserve_separators' values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different 'preserve_separators' values"); } if(!ContextMapping.mappingsAreEqual(getContextMapping(), fieldMergeWith.getContextMapping())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different 'context_mapping' values"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different 'context_mapping' values"); } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.maxInputLength = fieldMergeWith.maxInputLength; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java index 8e5c88a9636..5ab0049178b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DateFieldMapper.java @@ -23,14 +23,16 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.ToStringUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; @@ -38,8 +40,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; -import org.elasticsearch.common.lucene.search.NoCacheQuery; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; @@ -51,7 +51,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField; @@ -391,7 +391,7 @@ public class DateFieldMapper extends NumberFieldMapper { if (fieldData != null) { filter = NumericRangeFieldDataFilter.newLongRange(fieldData, lowerVal,upperVal, includeLower, includeUpper); } else { - filter = Queries.wrap(NumericRangeQuery.newLongRange( + filter = new QueryWrapperFilter(NumericRangeQuery.newLongRange( names.indexName(), precisionStep, lowerVal, upperVal, includeLower, includeUpper )); } @@ -405,7 +405,7 @@ public class DateFieldMapper extends NumberFieldMapper { return null; } long value = parseStringValue(nullValue); - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, value, value, true, true)); @@ -456,7 +456,7 @@ public class DateFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -492,12 +492,12 @@ public class DateFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((DateFieldMapper) mergeWith).nullValue; this.dateTimeFormatter = ((DateFieldMapper) mergeWith).dateTimeFormatter; } @@ -535,6 +535,15 @@ public class DateFieldMapper extends NumberFieldMapper { } } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinLong(terms); + long maxValue = NumericUtils.getMaxLong(terms); + return new FieldStats.Date( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue, dateTimeFormatter + ); + } + private long parseStringValue(String value) { try { return dateTimeFormatter.parser().parseMillis(value); @@ -578,7 +587,7 @@ public class DateFieldMapper extends NumberFieldMapper { } } - public final class LateParsingQuery extends NoCacheQuery { + public final class LateParsingQuery extends Query { final Object lowerTerm; final Object upperTerm; @@ -603,7 +612,7 @@ public class DateFieldMapper extends NumberFieldMapper { } @Override - public String innerToString(String s) { + public String toString(String s) { final StringBuilder sb = new StringBuilder(); return sb.append(names.indexName()).append(':') .append(includeLower ? '[' : '{') diff --git a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java index 6f6058439bf..e7ee19e3dba 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/DoubleFieldMapper.java @@ -27,17 +27,18 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.ByteUtils; @@ -49,7 +50,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -201,14 +202,14 @@ public class DoubleFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseDoubleValue(lowerTerm), upperTerm == null ? null : parseDoubleValue(upperTerm), includeLower, includeUpper)); } public Filter rangeFilter(Double lowerTerm, Double upperTerm, boolean includeLower, boolean includeUpper) { - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, lowerTerm, upperTerm, includeLower, includeUpper)); } @Override @@ -224,7 +225,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newDoubleRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); @@ -288,7 +289,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -331,12 +332,12 @@ public class DoubleFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((DoubleFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((DoubleFieldMapper) mergeWith).nullValueAsString; } @@ -360,6 +361,15 @@ public class DoubleFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + double minValue = NumericUtils.sortableLongToDouble(NumericUtils.getMinLong(terms)); + double maxValue = NumericUtils.sortableLongToDouble(NumericUtils.getMaxLong(terms)); + return new FieldStats.Double( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomDoubleNumericField extends CustomNumericField { private final double number; @@ -390,7 +400,7 @@ public class DoubleFieldMapper extends NumberFieldMapper { public static final FieldType TYPE = new FieldType(); static { - TYPE.setDocValuesType(DocValuesType.BINARY); + TYPE.setDocValuesType(DocValuesType.BINARY); TYPE.freeze(); } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java index ab1391e9698..968261805d9 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/FloatFieldMapper.java @@ -27,18 +27,19 @@ import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.util.ByteUtils; @@ -50,7 +51,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -211,7 +212,7 @@ public class FloatFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -230,7 +231,7 @@ public class FloatFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newFloatRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); @@ -294,7 +295,7 @@ public class FloatFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -337,12 +338,12 @@ public class FloatFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((FloatFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((FloatFieldMapper) mergeWith).nullValueAsString; } @@ -367,6 +368,15 @@ public class FloatFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + float minValue = NumericUtils.sortableIntToFloat(NumericUtils.getMinInt(terms)); + float maxValue = NumericUtils.sortableIntToFloat(NumericUtils.getMaxInt(terms)); + return new FieldStats.Float( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomFloatNumericField extends CustomNumericField { private final float number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java index eec2d84d0b9..8fc32539120 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/IntegerFieldMapper.java @@ -24,18 +24,19 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -45,7 +46,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -205,7 +206,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -224,7 +225,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); @@ -288,7 +289,7 @@ public class IntegerFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -328,12 +329,12 @@ public class IntegerFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((IntegerFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((IntegerFieldMapper) mergeWith).nullValueAsString; } @@ -357,6 +358,15 @@ public class IntegerFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinInt(terms); + long maxValue = NumericUtils.getMaxInt(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomIntegerNumericField extends CustomNumericField { private final int number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java index c10fdf79af6..bca94be2d34 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/LongFieldMapper.java @@ -24,18 +24,19 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -45,7 +46,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -195,7 +196,7 @@ public class LongFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseLongValue(lowerTerm), upperTerm == null ? null : parseLongValue(upperTerm), includeLower, includeUpper)); @@ -214,7 +215,7 @@ public class LongFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, nullValue, nullValue, true, true)); @@ -278,7 +279,7 @@ public class LongFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -310,12 +311,12 @@ public class LongFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((LongFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((LongFieldMapper) mergeWith).nullValueAsString; } @@ -338,6 +339,15 @@ public class LongFieldMapper extends NumberFieldMapper { } } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinLong(terms); + long maxValue = NumericUtils.getMaxLong(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomLongNumericField extends CustomNumericField { private final long number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java index 68158c6a316..7c9c920a3c6 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapper.java @@ -22,6 +22,7 @@ package org.elasticsearch.index.mapper.core; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.hash.MurmurHash3; @@ -35,6 +36,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.index.mapper.MapperBuilders.murmur3Field; import static org.elasticsearch.index.mapper.core.TypeParsers.parseNumberField; @@ -69,6 +71,17 @@ public class Murmur3FieldMapper extends LongFieldMapper { @SuppressWarnings("unchecked") public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { Builder builder = murmur3Field(name); + + // tweaking these settings is no longer allowed, the entire purpose of murmur3 fields is to store a hash + if (parserContext.indexVersionCreated().onOrAfter(Version.V_2_0_0)) { + if (node.get("doc_values") != null) { + throw new MapperParsingException("Setting [doc_values] cannot be modified for field [" + name + "]"); + } + if (node.get("index") != null) { + throw new MapperParsingException("Setting [index] cannot be modified for field [" + name + "]"); + } + } + parseNumberField(builder, name, node, parserContext); // Because this mapper extends LongFieldMapper the null_value field will be added to the JSON when transferring cluster state // between nodes so we have to remove the entry here so that the validation doesn't fail diff --git a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java index 8cccf0d6770..d042e1ea661 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/NumberFieldMapper.java @@ -34,13 +34,11 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.IndexableFieldType; import org.apache.lucene.index.Term; -import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.ByteArrayDataOutput; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; @@ -53,7 +51,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -239,7 +237,7 @@ public abstract class NumberFieldMapper extends AbstractFieldM RuntimeException e = null; try { innerParseCreateField(context, fields); - } catch (IllegalArgumentException | ElasticsearchIllegalArgumentException e1) { + } catch (IllegalArgumentException e1) { e = e1; } catch (MapperParsingException e2) { e = e2; @@ -276,8 +274,7 @@ public abstract class NumberFieldMapper extends AbstractFieldM @Override public final Query termQuery(Object value, @Nullable QueryParseContext context) { - TermQuery scoringQuery = new TermQuery(new Term(names.indexName(), indexedValueForSearch(value))); - return new ConstantScoreQuery(scoringQuery); + return new TermQuery(new Term(names.indexName(), indexedValueForSearch(value))); } @Override @@ -370,12 +367,12 @@ public abstract class NumberFieldMapper extends AbstractFieldM } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { NumberFieldMapper nfmMergeWith = (NumberFieldMapper) mergeWith; this.precisionStep = nfmMergeWith.precisionStep; this.includeInAll = nfmMergeWith.includeInAll; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java index b16518769d1..a3a905aed99 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/ShortFieldMapper.java @@ -24,18 +24,19 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.document.Field; import org.apache.lucene.document.FieldType; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.Terms; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.action.fieldstats.FieldStats; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -46,7 +47,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.query.QueryParseContext; @@ -211,7 +212,7 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValueAsInt(lowerTerm), upperTerm == null ? null : parseValueAsInt(upperTerm), includeLower, includeUpper)); @@ -219,7 +220,7 @@ public class ShortFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(QueryParseContext parseContext, Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), + return new QueryWrapperFilter(NumericRangeFieldDataFilter.newShortRange((IndexNumericFieldData) parseContext.getForField(this), lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -230,7 +231,7 @@ public class ShortFieldMapper extends NumberFieldMapper { if (nullValue == null) { return null; } - return Queries.wrap(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newIntRange(names.indexName(), precisionStep, nullValue.intValue(), nullValue.intValue(), true, true)); @@ -294,7 +295,7 @@ public class ShortFieldMapper extends NumberFieldMapper { } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -326,12 +327,12 @@ public class ShortFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((ShortFieldMapper) mergeWith).nullValue; this.nullValueAsString = ((ShortFieldMapper) mergeWith).nullValueAsString; } @@ -355,6 +356,15 @@ public class ShortFieldMapper extends NumberFieldMapper { } + @Override + public FieldStats stats(Terms terms, int maxDoc) throws IOException { + long minValue = NumericUtils.getMinInt(terms); + long maxValue = NumericUtils.getMaxInt(terms); + return new FieldStats.Long( + maxDoc, terms.getDocCount(), terms.getSumDocFreq(), terms.getSumTotalTermFreq(), minValue, maxValue + ); + } + public static class CustomShortNumericField extends CustomNumericField { private final short number; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java index 43da31336be..d5c4812b17d 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/StringFieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; @@ -37,7 +36,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.internal.AllFieldMapper; @@ -339,7 +338,7 @@ public class StringFieldMapper extends AbstractFieldMapper implements Al } else if ("boost".equals(currentFieldName) || "_boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new ElasticsearchIllegalArgumentException("unknown property [" + currentFieldName + "]"); + throw new IllegalArgumentException("unknown property [" + currentFieldName + "]"); } } } @@ -354,12 +353,12 @@ public class StringFieldMapper extends AbstractFieldMapper implements Al } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.includeInAll = ((StringFieldMapper) mergeWith).includeInAll; this.nullValue = ((StringFieldMapper) mergeWith).nullValue; this.ignoreAbove = ((StringFieldMapper) mergeWith).ignoreAbove; diff --git a/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java index 1c3dfd617a3..1d4a727d5cf 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapper.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.StringFieldMapper.ValueAndBoost; @@ -189,12 +189,12 @@ public class TokenCountFieldMapper extends IntegerFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.analyzer = ((TokenCountFieldMapper) mergeWith).analyzer; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java b/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java index cfed59be20f..20f9eda9b26 100644 --- a/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java +++ b/src/main/java/org/elasticsearch/index/mapper/core/TypeParsers.java @@ -34,6 +34,7 @@ import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper.Loading; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.object.ObjectMapper; import java.util.ArrayList; import java.util.Collections; @@ -332,6 +333,9 @@ public class TypeParsers { } else { throw new MapperParsingException("No type specified for property [" + multiFieldName + "]"); } + if (type.equals(ObjectMapper.CONTENT_TYPE) || type.equals(ObjectMapper.NESTED_CONTENT_TYPE)) { + throw new MapperParsingException("Type [" + type + "] cannot be used in multi field"); + } Mapper.TypeParser typeParser = parserContext.typeParser(type); if (typeParser == null) { diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java index f7a39b2c952..6b7b10f69c6 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapper.java @@ -28,8 +28,6 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -50,7 +48,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapperListener; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ObjectMapperListener; import org.elasticsearch.index.mapper.ParseContext; @@ -301,7 +299,7 @@ public class GeoPointFieldMapper extends AbstractFieldMapper implement public static final Encoding of(int numBytesPerValue) { final Encoding instance = INSTANCES[numBytesPerValue]; if (instance == null) { - throw new ElasticsearchIllegalStateException("No encoding for " + numBytesPerValue + " bytes per value"); + throw new IllegalStateException("No encoding for " + numBytesPerValue + " bytes per value"); } return instance; } @@ -593,12 +591,12 @@ public class GeoPointFieldMapper extends AbstractFieldMapper implement if (validateLat) { if (point.lat() > 90.0 || point.lat() < -90.0) { - throw new ElasticsearchIllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name()); + throw new IllegalArgumentException("illegal latitude value [" + point.lat() + "] for " + name()); } } if (validateLon) { if (point.lon() > 180.0 || point.lon() < -180) { - throw new ElasticsearchIllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name()); + throw new IllegalArgumentException("illegal longitude value [" + point.lon() + "] for " + name()); } } @@ -643,39 +641,39 @@ public class GeoPointFieldMapper extends AbstractFieldMapper implement } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } GeoPointFieldMapper fieldMergeWith = (GeoPointFieldMapper) mergeWith; if (this.enableLatLon != fieldMergeWith.enableLatLon) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different lat_lon"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different lat_lon"); } if (this.enableGeoHash != fieldMergeWith.enableGeoHash) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different geohash"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different geohash"); } if (this.geoHashPrecision != fieldMergeWith.geoHashPrecision) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different geohash_precision"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different geohash_precision"); } if (this.enableGeohashPrefix != fieldMergeWith.enableGeohashPrefix) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different geohash_prefix"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different geohash_prefix"); } if (this.normalizeLat != fieldMergeWith.normalizeLat) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different normalize_lat"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different normalize_lat"); } if (this.normalizeLon != fieldMergeWith.normalizeLon) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different normalize_lon"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different normalize_lon"); } if (!Objects.equal(this.precisionStep, fieldMergeWith.precisionStep)) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different precision_step"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different precision_step"); } if (this.validateLat != fieldMergeWith.validateLat) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different validate_lat"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different validate_lat"); } if (this.validateLon != fieldMergeWith.validateLon) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different validate_lon"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different validate_lon"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java index 979346767db..5a32182e1b3 100644 --- a/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapper.java @@ -29,7 +29,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoUtils; @@ -43,7 +42,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.AbstractFieldMapper; @@ -168,7 +167,7 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { .QUADTREE_LEVELS, false)); } } else { - throw new ElasticsearchIllegalArgumentException("Unknown prefix tree type [" + tree + "]"); + throw new IllegalArgumentException("Unknown prefix tree type [" + tree + "]"); } return new GeoShapeFieldMapper(names, prefixTree, strategyName, distanceErrorPct, orientation, fieldType, @@ -281,10 +280,10 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different field type"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different field type"); return; } final GeoShapeFieldMapper fieldMergeWith = (GeoShapeFieldMapper) mergeWith; @@ -292,7 +291,7 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { // prevent user from changing strategies if (!(this.defaultStrategy.getClass().equals(mergeWithStrategy.getClass()))) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different strategy"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different strategy"); } final SpatialPrefixTree grid = this.defaultStrategy.getGrid(); @@ -300,17 +299,17 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { // prevent user from changing trees (changes encoding) if (!grid.getClass().equals(mergeGrid.getClass())) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different tree"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different tree"); } // TODO we should allow this, but at the moment levels is used to build bookkeeping variables // in lucene's SpatialPrefixTree implementations, need a patch to correct that first if (grid.getMaxLevels() != mergeGrid.getMaxLevels()) { - mergeContext.addConflict("mapper [" + names.fullName() + "] has different tree_levels or precision"); + mergeResult.addConflict("mapper [" + names.fullName() + "] has different tree_levels or precision"); } // bail if there were merge conflicts - if (mergeContext.hasConflicts() || mergeContext.mergeFlags().simulate()) { + if (mergeResult.hasConflicts() || mergeResult.simulate()) { return; } @@ -384,7 +383,7 @@ public class GeoShapeFieldMapper extends AbstractFieldMapper { if (SpatialStrategy.TERM.getStrategyName().equals(strategyName)) { return termStrategy; } - throw new ElasticsearchIllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]"); + throw new IllegalArgumentException("Unknown prefix tree strategy [" + strategyName + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java index 7fd2d5859ea..b412df7af30 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/AllFieldMapper.java @@ -39,7 +39,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -314,11 +314,11 @@ public class AllFieldMapper extends AbstractFieldMapper implements Inter } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (((AllFieldMapper)mergeWith).enabled() != this.enabled() && ((AllFieldMapper)mergeWith).enabledState != Defaults.ENABLED) { - mergeContext.addConflict("mapper [" + names.fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); + mergeResult.addConflict("mapper [" + names.fullName() + "] enabled is " + this.enabled() + " now encountering "+ ((AllFieldMapper)mergeWith).enabled()); } - super.merge(mergeWith, mergeContext); + super.merge(mergeWith, mergeResult); } @Override diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java index 33633d3c06d..2026579dc2f 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapper.java @@ -26,7 +26,6 @@ import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -37,7 +36,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -110,7 +109,7 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement @Override public Mapper.Builder parse(String name, Map node, ParserContext parserContext) throws MapperParsingException { if (parserContext.indexVersionCreated().before(Version.V_1_3_0)) { - throw new ElasticsearchIllegalArgumentException("type="+CONTENT_TYPE+" is not supported on indices created before version 1.3.0. Is your cluster running multiple datanode versions?"); + throw new IllegalArgumentException("type="+CONTENT_TYPE+" is not supported on indices created before version 1.3.0. Is your cluster running multiple datanode versions?"); } FieldNamesFieldMapper.Builder builder = fieldNames(); @@ -140,7 +139,7 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement } public FieldNamesFieldMapper(String name, String indexName, float boost, FieldType fieldType, EnabledAttributeMapper enabledState, @Nullable Settings fieldDataSettings, Settings indexSettings) { - super(new Names(name, indexName, indexName, name), boost, fieldType, null, Lucene.KEYWORD_ANALYZER, + super(new Names(name, indexName, indexName, name), boost, fieldType, false, Lucene.KEYWORD_ANALYZER, Lucene.KEYWORD_ANALYZER, null, null, fieldDataSettings, indexSettings); this.defaultFieldType = Defaults.FIELD_TYPE; this.pre13Index = Version.indexCreated(indexSettings).before(Version.V_1_3_0); @@ -241,9 +240,6 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement if (fieldType.indexOptions() != IndexOptions.NONE || fieldType.stored()) { document.add(new Field(names().indexName(), fieldName, fieldType)); } - if (hasDocValues()) { - document.add(new SortedSetDocValuesField(names().indexName(), new BytesRef(fieldName))); - } } } } @@ -278,9 +274,9 @@ public class FieldNamesFieldMapper extends AbstractFieldMapper implement } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { FieldNamesFieldMapper fieldNamesMapperMergeWith = (FieldNamesFieldMapper)mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (fieldNamesMapperMergeWith.enabledState != enabledState && !fieldNamesMapperMergeWith.enabledState.unset()) { this.enabledState = fieldNamesMapperMergeWith.enabledState; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java index 549023faa4d..c9a38f18f9c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IdFieldMapper.java @@ -34,6 +34,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -41,7 +42,6 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -49,7 +49,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -200,7 +200,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { return super.termFilter(value, context); } - return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); + return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), value))); } @Override @@ -208,7 +208,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern if (fieldType.indexOptions() != IndexOptions.NONE || context == null) { return super.termsFilter(values, context); } - return Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); + return new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(context.queryTypes(), values))); } @Override @@ -238,7 +238,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern for (String queryType : queryTypes) { filter.add(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value)))), BooleanClause.Occur.SHOULD); } - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } @Override @@ -277,7 +277,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern filter.add(new RegexpQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(queryType, BytesRefs.toBytesRef(value))), flags, maxDeterminizedStates), BooleanClause.Occur.SHOULD); } - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } @Override @@ -361,7 +361,7 @@ public class IdFieldMapper extends AbstractFieldMapper implements Intern } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java index a530102e673..7f5249958ab 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/IndexFieldMapper.java @@ -34,7 +34,7 @@ import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -216,9 +216,9 @@ public class IndexFieldMapper extends AbstractFieldMapper implements Int } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { IndexFieldMapper indexFieldMapperMergeWith = (IndexFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (indexFieldMapperMergeWith.enabledState != enabledState && !indexFieldMapperMergeWith.enabledState.unset()) { this.enabledState = indexFieldMapperMergeWith.enabledState; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java index 963001cafb2..d6a14f7be3c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/ParentFieldMapper.java @@ -28,6 +28,7 @@ import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; @@ -44,7 +45,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -275,7 +276,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } BytesRef bValue = BytesRefs.toBytesRef(value); if (Uid.hasDelimiter(bValue)) { - return Queries.wrap(new TermQuery(new Term(names.indexName(), bValue))); + return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), bValue))); } List types = new ArrayList<>(context.mapperService().types().size()); @@ -288,14 +289,14 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter if (types.isEmpty()) { return Queries.newMatchNoDocsFilter(); } else if (types.size() == 1) { - return Queries.wrap(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); + return new QueryWrapperFilter(new TermQuery(new Term(names.indexName(), Uid.createUidAsBytes(types.get(0), bValue)))); } else { // we use all non child types, cause we don't know if its exact or not... List typesValues = new ArrayList<>(types.size()); for (String type : context.mapperService().types()) { typesValues.add(Uid.createUidAsBytes(type, bValue)); } - return Queries.wrap(new TermsQuery(names.indexName(), typesValues)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), typesValues)); } } @@ -328,7 +329,7 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } } } - return Queries.wrap(new TermsQuery(names.indexName(), bValues)); + return new QueryWrapperFilter(new TermsQuery(names.indexName(), bValues)); } /** @@ -363,13 +364,13 @@ public class ParentFieldMapper extends AbstractFieldMapper implements Inter } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { ParentFieldMapper other = (ParentFieldMapper) mergeWith; if (!Objects.equal(type, other.type)) { - mergeContext.addConflict("The _parent field's type option can't be changed: [" + type + "]->[" + other.type + "]"); + mergeResult.addConflict("The _parent field's type option can't be changed: [" + type + "]->[" + other.type + "]"); } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { ParentFieldMapper fieldMergeWith = (ParentFieldMapper) mergeWith; if (fieldMergeWith.customFieldDataSettings != null) { if (!Objects.equal(fieldMergeWith.customFieldDataSettings, this.customFieldDataSettings)) { diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java index 8aee69f8ba3..3631a3f68f9 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/RoutingFieldMapper.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -242,7 +242,7 @@ public class RoutingFieldMapper extends AbstractFieldMapper implements I } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java index bd954a8b756..7acc52f41cc 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/SizeFieldMapper.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -175,9 +175,9 @@ public class SizeFieldMapper extends IntegerFieldMapper implements RootMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { SizeFieldMapper sizeFieldMapperMergeWith = (SizeFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (sizeFieldMapperMergeWith.enabledState != enabledState && !sizeFieldMapperMergeWith.enabledState.unset()) { this.enabledState = sizeFieldMapperMergeWith.enabledState; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java index 3814ba41ee6..e1315d3e0c4 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/SourceFieldMapper.java @@ -26,6 +26,7 @@ import org.apache.lucene.document.StoredField; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -166,7 +167,7 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In } else if ("format".equals(fieldName)) { builder.format(nodeStringValue(fieldNode, null)); iterator.remove(); - } else if (fieldName.equals("includes")) { + } else if (fieldName.equals("includes") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { List values = (List) fieldNode; String[] includes = new String[values.size()]; for (int i = 0; i < includes.length; i++) { @@ -174,7 +175,7 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In } builder.includes(includes); iterator.remove(); - } else if (fieldName.equals("excludes")) { + } else if (fieldName.equals("excludes") && parserContext.indexVersionCreated().before(Version.V_2_0_0)) { List values = (List) fieldNode; String[] excludes = new String[values.size()]; for (int i = 0; i < excludes.length; i++) { @@ -417,9 +418,9 @@ public class SourceFieldMapper extends AbstractFieldMapper implements In } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { SourceFieldMapper sourceMergeWith = (SourceFieldMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (sourceMergeWith.compress != null) { this.compress = sourceMergeWith.compress; } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java index 10a14169755..62c0beff862 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TTLFieldMapper.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.AlreadyExpiredException; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -238,13 +238,13 @@ public class TTLFieldMapper extends LongFieldMapper implements InternalMapper, R } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { TTLFieldMapper ttlMergeWith = (TTLFieldMapper) mergeWith; if (((TTLFieldMapper) mergeWith).enabledState != Defaults.ENABLED_STATE) {//only do something if actually something was set for the document mapper that we merge with if (this.enabledState == EnabledAttributeMapper.ENABLED && ((TTLFieldMapper) mergeWith).enabledState == EnabledAttributeMapper.DISABLED) { - mergeContext.addConflict("_ttl cannot be disabled once it was enabled."); + mergeResult.addConflict("_ttl cannot be disabled once it was enabled."); } else { - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.enabledState = ttlMergeWith.enabledState; } } @@ -252,7 +252,7 @@ public class TTLFieldMapper extends LongFieldMapper implements InternalMapper, R if (ttlMergeWith.defaultTTL != -1) { // we never build the default when the field is disabled so we should also not set it // (it does not make a difference though as everything that is not build in toXContent will also not be set in the cluster) - if (!mergeContext.mergeFlags().simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { + if (!mergeResult.simulate() && (enabledState == EnabledAttributeMapper.ENABLED)) { this.defaultTTL = ttlMergeWith.defaultTTL; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java index 18075ff3866..160c5c03699 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TimestampFieldMapper.java @@ -35,7 +35,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -352,10 +352,10 @@ public class TimestampFieldMapper extends DateFieldMapper implements InternalMap } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { TimestampFieldMapper timestampFieldMapperMergeWith = (TimestampFieldMapper) mergeWith; - super.merge(mergeWith, mergeContext); - if (!mergeContext.mergeFlags().simulate()) { + super.merge(mergeWith, mergeResult); + if (!mergeResult.simulate()) { if (timestampFieldMapperMergeWith.enabledState != enabledState && !timestampFieldMapperMergeWith.enabledState.unset()) { this.enabledState = timestampFieldMapperMergeWith.enabledState; } @@ -364,18 +364,18 @@ public class TimestampFieldMapper extends DateFieldMapper implements InternalMap return; } if (defaultTimestamp == null) { - mergeContext.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + mergeResult.addConflict("Cannot update default in _timestamp value. Value is null now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); } else if (timestampFieldMapperMergeWith.defaultTimestamp() == null) { - mergeContext.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); + mergeResult.addConflict("Cannot update default in _timestamp value. Value is \" + defaultTimestamp.toString() + \" now encountering null"); } else if (!timestampFieldMapperMergeWith.defaultTimestamp().equals(defaultTimestamp)) { - mergeContext.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); + mergeResult.addConflict("Cannot update default in _timestamp value. Value is " + defaultTimestamp.toString() + " now encountering " + timestampFieldMapperMergeWith.defaultTimestamp()); } if (this.path != null) { if (path.equals(timestampFieldMapperMergeWith.path()) == false) { - mergeContext.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); + mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is " + (timestampFieldMapperMergeWith.path() == null ? "missing" : timestampFieldMapperMergeWith.path())); } } else if (timestampFieldMapperMergeWith.path() != null) { - mergeContext.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); + mergeResult.addConflict("Cannot update path in _timestamp value. Value is " + path + " path in merged mapping is missing"); } } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java index 206cc3a8c3c..b5668761c03 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/TypeFieldMapper.java @@ -28,20 +28,20 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.PrefixQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.lucene.Lucene; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.RootMapper; @@ -133,15 +133,15 @@ public class TypeFieldMapper extends AbstractFieldMapper implements Inte @Override public Query termQuery(Object value, @Nullable QueryParseContext context) { - return new ConstantScoreQuery(context.cacheFilter(termFilter(value, context), null, context.autoFilterCachePolicy())); + return new ConstantScoreQuery(termFilter(value, context)); } @Override public Filter termFilter(Object value, @Nullable QueryParseContext context) { if (fieldType.indexOptions() == IndexOptions.NONE) { - return Queries.wrap(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); + return new QueryWrapperFilter(new PrefixQuery(new Term(UidFieldMapper.NAME, Uid.typePrefixAsBytes(BytesRefs.toBytesRef(value))))); } - return Queries.wrap(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); + return new QueryWrapperFilter(new TermQuery(names().createIndexNameTerm(BytesRefs.toBytesRef(value)))); } @Override @@ -210,7 +210,7 @@ public class TypeFieldMapper extends AbstractFieldMapper implements Inte } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java index 0ae2b497593..01744d37d3b 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/UidFieldMapper.java @@ -35,7 +35,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -228,7 +228,7 @@ public class UidFieldMapper extends AbstractFieldMapper implements Internal } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // do nothing here, no merging, but also no exception } } diff --git a/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java index 4ae3eaa415a..cb143e877a8 100644 --- a/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/internal/VersionFieldMapper.java @@ -24,14 +24,13 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.NumericDocValuesField; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; @@ -163,7 +162,7 @@ public class VersionFieldMapper extends AbstractFieldMapper implements Int } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // nothing to do } diff --git a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java index 23d373c65ab..64ba3bace25 100644 --- a/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/ip/IpFieldMapper.java @@ -28,15 +28,14 @@ import org.apache.lucene.index.IndexOptions; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Explicit; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -48,7 +47,7 @@ import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.core.LongFieldMapper.CustomLongNumericField; @@ -83,22 +82,22 @@ public class IpFieldMapper extends NumberFieldMapper { private static final Pattern pattern = Pattern.compile("\\."); - public static long ipToLong(String ip) throws ElasticsearchIllegalArgumentException { + public static long ipToLong(String ip) { try { if (!InetAddresses.isInetAddress(ip)) { - throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "], not a valid ip address"); + throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ip address"); } String[] octets = pattern.split(ip); if (octets.length != 4) { - throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "], not a valid ipv4 address (4 dots)"); + throw new IllegalArgumentException("failed to parse ip [" + ip + "], not a valid ipv4 address (4 dots)"); } return (Long.parseLong(octets[0]) << 24) + (Integer.parseInt(octets[1]) << 16) + (Integer.parseInt(octets[2]) << 8) + Integer.parseInt(octets[3]); } catch (Exception e) { - if (e instanceof ElasticsearchIllegalArgumentException) { - throw (ElasticsearchIllegalArgumentException) e; + if (e instanceof IllegalArgumentException) { + throw (IllegalArgumentException) e; } - throw new ElasticsearchIllegalArgumentException("failed to parse ip [" + ip + "]", e); + throw new IllegalArgumentException("failed to parse ip [" + ip + "]", e); } } @@ -235,7 +234,7 @@ public class IpFieldMapper extends NumberFieldMapper { long iSim; try { iSim = ipToLong(fuzziness.asString()); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { iSim = fuzziness.asLong(); } return NumericRangeQuery.newLongRange(names.indexName(), precisionStep, @@ -254,7 +253,7 @@ public class IpFieldMapper extends NumberFieldMapper { @Override public Filter rangeFilter(Object lowerTerm, Object upperTerm, boolean includeLower, boolean includeUpper, @Nullable QueryParseContext context) { - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, lowerTerm == null ? null : parseValue(lowerTerm), upperTerm == null ? null : parseValue(upperTerm), includeLower, includeUpper)); @@ -274,7 +273,7 @@ public class IpFieldMapper extends NumberFieldMapper { return null; } final long value = ipToLong(nullValue); - return Queries.wrap(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, + return new QueryWrapperFilter(NumericRangeQuery.newLongRange(names.indexName(), precisionStep, value, value, true, true)); @@ -320,12 +319,12 @@ public class IpFieldMapper extends NumberFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { - super.merge(mergeWith, mergeContext); + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { + super.merge(mergeWith, mergeResult); if (!this.getClass().equals(mergeWith.getClass())) { return; } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { this.nullValue = ((IpFieldMapper) mergeWith).nullValue; } } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java index 08888e9aea2..d8a14abbacc 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/DynamicTemplate.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.index.mapper.ContentPath; @@ -46,7 +45,7 @@ public class DynamicTemplate { } else if ("regex".equals(value)) { return REGEX; } - throw new ElasticsearchIllegalArgumentException("No matching pattern matched on [" + value + "]"); + throw new IllegalArgumentException("No matching pattern matched on [" + value + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java index fab309081ab..8004bb38e7c 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/ObjectMapper.java @@ -20,44 +20,31 @@ package org.elasticsearch.index.mapper.object; import com.google.common.collect.Iterables; - -import org.apache.lucene.document.Field; -import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.CopyOnWriteHashMap; -import org.elasticsearch.common.joda.FormatDateTimeFormatter; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapperListener; import org.elasticsearch.index.mapper.InternalMapper; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperBuilders; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MapperUtils; -import org.elasticsearch.index.mapper.MergeContext; import org.elasticsearch.index.mapper.MergeMappingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ObjectMapperListener; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParseContext.Document; -import org.elasticsearch.index.mapper.StrictDynamicMappingException; import org.elasticsearch.index.mapper.internal.AllFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; -import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.settings.IndexSettings; import java.io.IOException; @@ -74,15 +61,7 @@ import java.util.TreeMap; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.elasticsearch.index.mapper.MapperBuilders.binaryField; -import static org.elasticsearch.index.mapper.MapperBuilders.booleanField; -import static org.elasticsearch.index.mapper.MapperBuilders.dateField; -import static org.elasticsearch.index.mapper.MapperBuilders.doubleField; -import static org.elasticsearch.index.mapper.MapperBuilders.floatField; -import static org.elasticsearch.index.mapper.MapperBuilders.integerField; -import static org.elasticsearch.index.mapper.MapperBuilders.longField; import static org.elasticsearch.index.mapper.MapperBuilders.object; -import static org.elasticsearch.index.mapper.MapperBuilders.stringField; import static org.elasticsearch.index.mapper.core.TypeParsers.parsePathType; /** @@ -262,7 +241,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea builder.pathType(parsePathType(name, fieldNode.toString())); return true; } - return false; + return false; } protected static void parseNested(String name, Map node, ObjectMapper.Builder builder) { @@ -388,7 +367,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } this.nestedTypePathAsString = "__" + fullPath; this.nestedTypePathAsBytes = new BytesRef(nestedTypePathAsString); - this.nestedTypeFilter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); + this.nestedTypeFilter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, nestedTypePathAsBytes))); } @Override @@ -418,6 +397,18 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea return this.name; } + public boolean isEnabled() { + return this.enabled; + } + + public ContentPath.Type pathType() { + return pathType; + } + + public Mapper getMapper(String field) { + return mappers.get(field); + } + @Override public void includeInAll(Boolean includeInAll) { if (includeInAll == null) { @@ -500,451 +491,36 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } public final Dynamic dynamic() { - return this.dynamic == null ? Dynamic.TRUE : this.dynamic; - } - - public void setDynamic(Dynamic dynamic) { - this.dynamic = dynamic; - } - - protected boolean allowValue() { - return true; + return dynamic; } @Override - public ObjectMapper parse(ParseContext context) throws IOException { - if (!enabled) { - context.parser().skipChildren(); - return null; - } - XContentParser parser = context.parser(); - - String currentFieldName = parser.currentName(); - XContentParser.Token token = parser.currentToken(); - if (token == XContentParser.Token.VALUE_NULL) { - // the object is null ("obj1" : null), simply bail - return null; - } - - if (token.isValue() && !allowValue()) { - // if we are parsing an object but it is just a value, its only allowed on root level parsers with there - // is a field name with the same name as the type - throw new MapperParsingException("object mapping for [" + name + "] tried to parse field [" + currentFieldName + "] as object, but found a concrete value"); - } - - if (nested.isNested()) { - context = context.createNestedContext(fullPath); - Document nestedDoc = context.doc(); - Document parentDoc = nestedDoc.getParent(); - // pre add the uid field if possible (id was already provided) - IndexableField uidField = parentDoc.getField(UidFieldMapper.NAME); - if (uidField != null) { - // we don't need to add it as a full uid field in nested docs, since we don't need versioning - // we also rely on this for UidField#loadVersion - - // this is a deeply nested field - nestedDoc.add(new Field(UidFieldMapper.NAME, uidField.stringValue(), UidFieldMapper.Defaults.NESTED_FIELD_TYPE)); - } - // the type of the nested doc starts with __, so we can identify that its a nested one in filters - // note, we don't prefix it with the type of the doc since it allows us to execute a nested query - // across types (for example, with similar nested objects) - nestedDoc.add(new Field(TypeFieldMapper.NAME, nestedTypePathAsString, TypeFieldMapper.Defaults.FIELD_TYPE)); - } - - ContentPath.Type origPathType = context.path().pathType(); - context.path().pathType(pathType); - - // if we are at the end of the previous object, advance - if (token == XContentParser.Token.END_OBJECT) { - token = parser.nextToken(); - } - if (token == XContentParser.Token.START_OBJECT) { - // if we are just starting an OBJECT, advance, this is the object we are parsing, we need the name first - token = parser.nextToken(); - } - - ObjectMapper update = null; - while (token != XContentParser.Token.END_OBJECT) { - ObjectMapper newUpdate = null; - if (token == XContentParser.Token.START_OBJECT) { - newUpdate = serializeObject(context, currentFieldName); - } else if (token == XContentParser.Token.START_ARRAY) { - newUpdate = serializeArray(context, currentFieldName); - } else if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_NULL) { - serializeNullValue(context, currentFieldName); - } else if (token == null) { - throw new MapperParsingException("object mapping for [" + name + "] tried to parse field [" + currentFieldName + "] as object, but got EOF, has a concrete value been provided to it?"); - } else if (token.isValue()) { - newUpdate = serializeValue(context, currentFieldName, token); - } - token = parser.nextToken(); - if (newUpdate != null) { - if (update == null) { - update = newUpdate; - } else { - MapperUtils.merge(update, newUpdate); - } - } - } - // restore the enable path flag - context.path().pathType(origPathType); - if (nested.isNested()) { - Document nestedDoc = context.doc(); - Document parentDoc = nestedDoc.getParent(); - if (nested.isIncludeInParent()) { - for (IndexableField field : nestedDoc.getFields()) { - if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { - continue; - } else { - parentDoc.add(field); - } - } - } - if (nested.isIncludeInRoot()) { - Document rootDoc = context.rootDoc(); - // don't add it twice, if its included in parent, and we are handling the master doc... - if (!nested.isIncludeInParent() || parentDoc != rootDoc) { - for (IndexableField field : nestedDoc.getFields()) { - if (field.name().equals(UidFieldMapper.NAME) || field.name().equals(TypeFieldMapper.NAME)) { - continue; - } else { - rootDoc.add(field); - } - } - } - } - } - return update; - } - - private void serializeNullValue(ParseContext context, String lastFieldName) throws IOException { - // we can only handle null values if we have mappings for them - Mapper mapper = mappers.get(lastFieldName); - if (mapper != null) { - if (mapper instanceof FieldMapper) { - if (!((FieldMapper) mapper).supportsNullValue()) { - throw new MapperParsingException("no object mapping found for null value in [" + lastFieldName + "]"); - } - } - mapper.parse(context); - } else if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, lastFieldName); - } - } - - private ObjectMapper serializeObject(final ParseContext context, String currentFieldName) throws IOException { - if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + name + "] trying to serialize an object with no field associated with it, current value [" + context.parser().textOrNull() + "]"); - } - context.path().add(currentFieldName); - - ObjectMapper update = null; - Mapper objectMapper = mappers.get(currentFieldName); - if (objectMapper != null) { - final Mapper subUpdate = objectMapper.parse(context); - if (subUpdate != null) { - // propagate mapping update - update = mappingUpdate(subUpdate); - } - } else { - Dynamic dynamic = this.dynamic; - if (dynamic == null) { - dynamic = context.root().dynamic(); - } - if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, currentFieldName); - } else if (dynamic == Dynamic.TRUE) { - // remove the current field name from path, since template search and the object builder add it as well... - context.path().remove(); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "object"); - if (builder == null) { - builder = MapperBuilders.object(currentFieldName).enabled(true).pathType(pathType); - // if this is a non root object, then explicitly set the dynamic behavior if set - if (!(this instanceof RootObjectMapper) && this.dynamic != Defaults.DYNAMIC) { - ((Builder) builder).dynamic(this.dynamic); - } - } - BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path()); - objectMapper = builder.build(builderContext); - context.path().add(currentFieldName); - update = mappingUpdate(MapperUtils.parseAndMergeUpdate(objectMapper, context)); - } else { - // not dynamic, read everything up to end object - context.parser().skipChildren(); - } - } - - context.path().remove(); - return update; - } - - private ObjectMapper serializeArray(ParseContext context, String lastFieldName) throws IOException { - String arrayFieldName = lastFieldName; - Mapper mapper = mappers.get(lastFieldName); - if (mapper != null) { - // There is a concrete mapper for this field already. Need to check if the mapper - // expects an array, if so we pass the context straight to the mapper and if not - // we serialize the array components - if (mapper instanceof ArrayValueMapperParser) { - final Mapper subUpdate = mapper.parse(context); - if (subUpdate != null) { - // propagate the mapping update - return mappingUpdate(subUpdate); - } else { - return null; - } - } else { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - } else { - - Dynamic dynamic = this.dynamic; - if (dynamic == null) { - dynamic = context.root().dynamic(); - } - if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, arrayFieldName); - } else if (dynamic == Dynamic.TRUE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, arrayFieldName, "object"); - if (builder == null) { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path()); - mapper = builder.build(builderContext); - if (mapper != null && mapper instanceof ArrayValueMapperParser) { - context.path().add(arrayFieldName); - mapper = MapperUtils.parseAndMergeUpdate(mapper, context); - return mappingUpdate(mapper); - } else { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - } else { - return serializeNonDynamicArray(context, lastFieldName, arrayFieldName); - } - } - } - - private ObjectMapper serializeNonDynamicArray(ParseContext context, String lastFieldName, String arrayFieldName) throws IOException { - XContentParser parser = context.parser(); - XContentParser.Token token; - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - if (token == XContentParser.Token.START_OBJECT) { - return serializeObject(context, lastFieldName); - } else if (token == XContentParser.Token.START_ARRAY) { - return serializeArray(context, lastFieldName); - } else if (token == XContentParser.Token.FIELD_NAME) { - lastFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_NULL) { - serializeNullValue(context, lastFieldName); - } else if (token == null) { - throw new MapperParsingException("object mapping for [" + name + "] with array for [" + arrayFieldName + "] tried to parse as array, but got EOF, is there a mismatch in types for the same field?"); - } else { - return serializeValue(context, lastFieldName, token); - } - } - return null; - } - - private ObjectMapper serializeValue(final ParseContext context, String currentFieldName, XContentParser.Token token) throws IOException { - if (currentFieldName == null) { - throw new MapperParsingException("object mapping [" + name + "] trying to serialize a value with no field associated with it, current value [" + context.parser().textOrNull() + "]"); - } - Mapper mapper = mappers.get(currentFieldName); - if (mapper != null) { - Mapper subUpdate = mapper.parse(context); - if (subUpdate == null) { - return null; - } - return mappingUpdate(subUpdate); - } else { - return parseDynamicValue(context, currentFieldName, token); - } - } - - public ObjectMapper parseDynamicValue(final ParseContext context, String currentFieldName, XContentParser.Token token) throws IOException { - Dynamic dynamic = this.dynamic; - if (dynamic == null) { - dynamic = context.root().dynamic(); - } - if (dynamic == Dynamic.STRICT) { - throw new StrictDynamicMappingException(fullPath, currentFieldName); - } - if (dynamic == Dynamic.FALSE) { - return null; - } - Mapper mapper = null; - BuilderContext builderContext = new BuilderContext(context.indexSettings(), context.path()); - if (token == XContentParser.Token.VALUE_STRING) { - boolean resolved = false; - - // do a quick test to see if its fits a dynamic template, if so, use it. - // we need to do it here so we can handle things like attachment templates, where calling - // text (to see if its a date) causes the binary value to be cleared - if (!resolved) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string", null); - if (builder != null) { - mapper = builder.build(builderContext); - resolved = true; - } - } - - if (!resolved && context.root().dateDetection()) { - String text = context.parser().text(); - // a safe check since "1" gets parsed as well - if (Strings.countOccurrencesOf(text, ":") > 1 || Strings.countOccurrencesOf(text, "-") > 1 || Strings.countOccurrencesOf(text, "/") > 1) { - for (FormatDateTimeFormatter dateTimeFormatter : context.root().dynamicDateTimeFormatters()) { - try { - dateTimeFormatter.parser().parseMillis(text); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "date"); - if (builder == null) { - builder = dateField(currentFieldName).dateTimeFormatter(dateTimeFormatter); - } - mapper = builder.build(builderContext); - resolved = true; - break; - } catch (Exception e) { - // failure to parse this, continue - } - } - } - } - if (!resolved && context.root().numericDetection()) { - String text = context.parser().text(); - try { - Long.parseLong(text); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); - if (builder == null) { - builder = longField(currentFieldName); - } - mapper = builder.build(builderContext); - resolved = true; - } catch (Exception e) { - // not a long number - } - if (!resolved) { - try { - Double.parseDouble(text); - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); - if (builder == null) { - builder = doubleField(currentFieldName); - } - mapper = builder.build(builderContext); - resolved = true; - } catch (Exception e) { - // not a long number - } - } - } - if (!resolved) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "string"); - if (builder == null) { - builder = stringField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (token == XContentParser.Token.VALUE_NUMBER) { - XContentParser.NumberType numberType = context.parser().numberType(); - if (numberType == XContentParser.NumberType.INT) { - if (context.parser().estimatedNumberType()) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); - if (builder == null) { - builder = longField(currentFieldName); - } - mapper = builder.build(builderContext); - } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "integer"); - if (builder == null) { - builder = integerField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (numberType == XContentParser.NumberType.LONG) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "long"); - if (builder == null) { - builder = longField(currentFieldName); - } - mapper = builder.build(builderContext); - } else if (numberType == XContentParser.NumberType.FLOAT) { - if (context.parser().estimatedNumberType()) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); - if (builder == null) { - builder = doubleField(currentFieldName); - } - mapper = builder.build(builderContext); - } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "float"); - if (builder == null) { - builder = floatField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (numberType == XContentParser.NumberType.DOUBLE) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "double"); - if (builder == null) { - builder = doubleField(currentFieldName); - } - mapper = builder.build(builderContext); - } - } else if (token == XContentParser.Token.VALUE_BOOLEAN) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "boolean"); - if (builder == null) { - builder = booleanField(currentFieldName); - } - mapper = builder.build(builderContext); - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, "binary"); - if (builder == null) { - builder = binaryField(currentFieldName); - } - mapper = builder.build(builderContext); - } else { - Mapper.Builder builder = context.root().findTemplateBuilder(context, currentFieldName, null); - if (builder != null) { - mapper = builder.build(builderContext); - } else { - // TODO how do we identify dynamically that its a binary value? - throw new ElasticsearchIllegalStateException("Can't handle serializing a dynamic type with content token [" + token + "] and field name [" + currentFieldName + "]"); - } - } - - mapper = MapperUtils.parseAndMergeUpdate(mapper, context); - - ObjectMapper update = null; - if (mapper != null) { - update = mappingUpdate(mapper); - } - return update; - } - - @Override - public void merge(final Mapper mergeWith, final MergeContext mergeContext) throws MergeMappingException { + public void merge(final Mapper mergeWith, final MergeResult mergeResult) throws MergeMappingException { if (!(mergeWith instanceof ObjectMapper)) { - mergeContext.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); + mergeResult.addConflict("Can't merge a non object mapping [" + mergeWith.name() + "] with an object mapping [" + name() + "]"); return; } ObjectMapper mergeWithObject = (ObjectMapper) mergeWith; if (nested().isNested()) { if (!mergeWithObject.nested().isNested()) { - mergeContext.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); + mergeResult.addConflict("object mapping [" + name() + "] can't be changed from nested to non-nested"); return; } } else { if (mergeWithObject.nested().isNested()) { - mergeContext.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); + mergeResult.addConflict("object mapping [" + name() + "] can't be changed from non-nested to nested"); return; } } - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { if (mergeWithObject.dynamic != null) { this.dynamic = mergeWithObject.dynamic; } } - doMerge(mergeWithObject, mergeContext); + doMerge(mergeWithObject, mergeResult); List mappersToPut = new ArrayList<>(); FieldMapperListener.Aggregator newFieldMappers = new FieldMapperListener.Aggregator(); @@ -954,20 +530,20 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea Mapper mergeIntoMapper = mappers.get(mergeWithMapper.name()); if (mergeIntoMapper == null) { // no mapping, simply add it if not simulating - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { mappersToPut.add(mergeWithMapper); mergeWithMapper.traverse(newFieldMappers); mergeWithMapper.traverse(newObjectMappers); } } else { - mergeIntoMapper.merge(mergeWithMapper, mergeContext); + mergeIntoMapper.merge(mergeWithMapper, mergeResult); } } if (!newFieldMappers.mappers.isEmpty()) { - mergeContext.addFieldMappers(newFieldMappers.mappers); + mergeResult.addFieldMappers(newFieldMappers.mappers); } if (!newObjectMappers.mappers.isEmpty()) { - mergeContext.addObjectMappers(newObjectMappers.mappers); + mergeResult.addObjectMappers(newObjectMappers.mappers); } // add the mappers only after the administration have been done, so it will not be visible to parser (which first try to read with no lock) for (Mapper mapper : mappersToPut) { @@ -975,7 +551,7 @@ public class ObjectMapper implements Mapper, AllFieldMapper.IncludeInAll, Clonea } } - protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) { + protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { } diff --git a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java index 56d2b96429c..e4f5a8d7a03 100644 --- a/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java +++ b/src/main/java/org/elasticsearch/index/mapper/object/RootObjectMapper.java @@ -255,14 +255,9 @@ public class RootObjectMapper extends ObjectMapper { } @Override - protected boolean allowValue() { - return true; - } - - @Override - protected void doMerge(ObjectMapper mergeWith, MergeContext mergeContext) { + protected void doMerge(ObjectMapper mergeWith, MergeResult mergeResult) { RootObjectMapper mergeWithObject = (RootObjectMapper) mergeWith; - if (!mergeContext.mergeFlags().simulate()) { + if (!mergeResult.simulate()) { // merge them List mergedTemplates = Lists.newArrayList(Arrays.asList(this.dynamicTemplates)); for (DynamicTemplate template : mergeWithObject.dynamicTemplates) { diff --git a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java index c3aa5e16606..d304d24cdbe 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/AbstractMergePolicyProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.merge.policy; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.TieredMergePolicy; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.index.shard.AbstractIndexShardComponent; import org.elasticsearch.index.store.Store; @@ -46,11 +45,11 @@ public abstract class AbstractMergePolicyProvider extend try { double value = Double.parseDouble(noCFSRatio); if (value < 0.0 || value > 1.0) { - throw new ElasticsearchIllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); + throw new IllegalArgumentException("NoCFSRatio must be in the interval [0..1] but was: [" + value + "]"); } return value; } catch (NumberFormatException ex) { - throw new ElasticsearchIllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex); + throw new IllegalArgumentException("Expected a boolean or a value in the interval [0..1] but was: [" + noCFSRatio + "]", ex); } } } diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java index 26b3ec39b91..d3107fa33d0 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/LogByteSizeMergePolicyProvider.java @@ -19,9 +19,8 @@ package org.elasticsearch.index.merge.policy; +import com.google.common.base.Preconditions; import org.apache.lucene.index.LogByteSizeMergePolicy; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -71,7 +70,7 @@ public class LogByteSizeMergePolicyProvider extends AbstractMergePolicyProvider< } @Override - public void close() throws ElasticsearchException { + public void close() { indexSettingsService.removeListener(applySettings); } diff --git a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java index b5949086206..a46c0f0aa9c 100644 --- a/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java +++ b/src/main/java/org/elasticsearch/index/merge/policy/LogDocMergePolicyProvider.java @@ -19,9 +19,8 @@ package org.elasticsearch.index.merge.policy; +import com.google.common.base.Preconditions; import org.apache.lucene.index.LogDocMergePolicy; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettingsService; @@ -64,7 +63,7 @@ public class LogDocMergePolicyProvider extends AbstractMergePolicyProvider cache = new CloseableThreadLocal() { @Override protected QueryParseContext initialValue() { - return new QueryParseContext(shardId.index(), queryParserService, true); + return new QueryParseContext(shardId.index(), queryParserService); } }; @@ -223,7 +221,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple context.setMapUnmappedFieldAsString(mapUnmappedFieldsAsString ? true : false); return queryParserService.parseInnerQuery(context); } catch (IOException e) { - throw new QueryParsingException(queryParserService.index(), "Failed to parse", e); + throw new QueryParsingException(context, "Failed to parse", e); } finally { if (type != null) { QueryParseContext.setTypes(previousTypes); @@ -280,13 +278,7 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple shard.refresh("percolator_load_queries"); // Maybe add a mode load? This isn't really a write. We need write b/c state=post_recovery try (Engine.Searcher searcher = shard.acquireSearcher("percolator_load_queries", true)) { - Query query = new ConstantScoreQuery( - indexCache.filter().cache( - Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME))), - null, - queryParserService.autoFilterCachePolicy() - ) - ); + Query query = new TermQuery(new Term(TypeFieldMapper.NAME, PercolatorService.TYPE_NAME)); QueriesLoaderCollector queryCollector = new QueriesLoaderCollector(PercolatorQueriesRegistry.this, logger, mapperService, indexFieldDataService); searcher.searcher().search(query, queryCollector); Map queries = queryCollector.queries(); @@ -345,15 +337,5 @@ public class PercolatorQueriesRegistry extends AbstractIndexShardComponent imple removePercolateQuery(delete.id()); } } - - // Updating the live percolate queries for a delete by query is tricky with the current way delete by queries - // are handled. It is only possible if we put a big lock around the post delete by query hook... - - // If we implement delete by query, that just runs a query and generates delete operations in a bulk, then - // updating the live percolator is automatically supported for delete by query. -// @Override -// public void postDeleteByQuery(Engine.DeleteByQuery deleteByQuery) { -// } } - } diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java index c69f7c8ef0f..87611a03b73 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterBuilder.java @@ -34,9 +34,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { private ArrayList filters = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public AndFilterBuilder(FilterBuilder... filters) { @@ -53,19 +50,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public AndFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public AndFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ @@ -82,12 +66,6 @@ public class AndFilterBuilder extends BaseFilterBuilder { filter.toXContent(builder, params); } builder.endArray(); - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java index 176a8c6dd7b..f0c8c2724bb 100644 --- a/src/main/java/org/elasticsearch/index/query/AndFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/AndFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -56,9 +54,6 @@ public class AndFilterParser implements FilterParser { ArrayList filters = newArrayList(); boolean filtersFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token = parser.currentToken(); @@ -74,6 +69,8 @@ public class AndFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { if ("filters".equals(currentFieldName)) { filtersFound = true; @@ -93,21 +90,17 @@ public class AndFilterParser implements FilterParser { } } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[and] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[and] filter does not support [" + currentFieldName + "]"); } } } } if (!filtersFound) { - throw new QueryParsingException(parseContext.index(), "[and] filter requires 'filters' to be set on it'"); + throw new QueryParsingException(parseContext, "[and] filter requires 'filters' to be set on it'"); } if (filters.isEmpty()) { @@ -120,10 +113,7 @@ public class AndFilterParser implements FilterParser { for (Filter filter : filters) { boolQuery.add(filter, Occur.MUST); } - Filter filter = Queries.wrap(boolQuery); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java index ccd2b3aa723..69264fb4b45 100644 --- a/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BaseFilterBuilder.java @@ -45,12 +45,12 @@ public abstract class BaseFilterBuilder implements FilterBuilder { } @Override - public BytesReference buildAsBytes() throws ElasticsearchException { + public BytesReference buildAsBytes() { return buildAsBytes(XContentType.JSON); } @Override - public BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException { + public BytesReference buildAsBytes(XContentType contentType) { try { XContentBuilder builder = XContentFactory.contentBuilder(contentType); toXContent(builder, EMPTY_PARAMS); diff --git a/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java index 6cb13150740..daa61f9d6e5 100644 --- a/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BaseQueryBuilder.java @@ -45,12 +45,12 @@ public abstract class BaseQueryBuilder implements QueryBuilder { } @Override - public BytesReference buildAsBytes() throws ElasticsearchException { + public BytesReference buildAsBytes() { return buildAsBytes(XContentType.JSON); } @Override - public BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException { + public BytesReference buildAsBytes(XContentType contentType) { try { XContentBuilder builder = XContentFactory.contentBuilder(contentType); toXContent(builder, EMPTY_PARAMS); diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java index f4982f12f69..330adaf8d08 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterBuilder.java @@ -36,9 +36,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { private ArrayList shouldClauses = new ArrayList<>(); - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -114,19 +111,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public BoolFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public BoolFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject("bool"); @@ -137,12 +121,6 @@ public class BoolFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java index fcd2e68c8b4..7d96f1bc3bc 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoolFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -52,9 +50,6 @@ public class BoolFilterParser implements FilterParser { BooleanQuery boolFilter = new BooleanQuery(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token; @@ -64,6 +59,8 @@ public class BoolFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("must".equals(currentFieldName)) { hasAnyFilter = true; @@ -85,7 +82,7 @@ public class BoolFilterParser implements FilterParser { boolFilter.add(new BooleanClause(filter, BooleanClause.Occur.SHOULD)); } } else { - throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("must".equals(currentFieldName)) { @@ -114,23 +111,19 @@ public class BoolFilterParser implements FilterParser { } } } else { - throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[bool] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] filter does not support [" + currentFieldName + "]"); } } } if (!hasAnyFilter) { - throw new QueryParsingException(parseContext.index(), "[bool] filter has no inner should/must/must_not elements"); + throw new QueryParsingException(parseContext, "[bool] filter has no inner should/must/must_not elements"); } if (boolFilter.clauses().isEmpty()) { @@ -138,10 +131,7 @@ public class BoolFilterParser implements FilterParser { return null; } - Filter filter = Queries.wrap(boolFilter); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java b/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java index 29d4ba2edd5..b7c31647c94 100644 --- a/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoolQueryParser.java @@ -85,7 +85,7 @@ public class BoolQueryParser implements QueryParser { clauses.add(new BooleanClause(query, BooleanClause.Occur.SHOULD)); } } else { - throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("must".equals(currentFieldName)) { @@ -110,7 +110,7 @@ public class BoolQueryParser implements QueryParser { } } } else { - throw new QueryParsingException(parseContext.index(), "bool query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "bool query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) { @@ -126,7 +126,7 @@ public class BoolQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[bool] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[bool] query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index aaf36915577..fef3cd30734 100644 --- a/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -74,13 +73,13 @@ public class BoostingQueryBuilder extends BaseQueryBuilder implements BoostableQ @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (positiveQuery == null) { - throw new ElasticsearchIllegalArgumentException("boosting query requires positive query to be set"); + throw new IllegalArgumentException("boosting query requires positive query to be set"); } if (negativeQuery == null) { - throw new ElasticsearchIllegalArgumentException("boosting query requires negative query to be set"); + throw new IllegalArgumentException("boosting query requires negative query to be set"); } if (negativeBoost == -1) { - throw new ElasticsearchIllegalArgumentException("boosting query requires negativeBoost to be set"); + throw new IllegalArgumentException("boosting query requires negativeBoost to be set"); } builder.startObject(BoostingQueryParser.NAME); builder.field("positive"); diff --git a/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java b/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java index a117256ece1..c160b2f9a4a 100644 --- a/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/BoostingQueryParser.java @@ -66,7 +66,7 @@ public class BoostingQueryParser implements QueryParser { negativeQuery = parseContext.parseInnerQuery(); negativeQueryFound = true; } else { - throw new QueryParsingException(parseContext.index(), "[boosting] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[boosting] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("negative_boost".equals(currentFieldName) || "negativeBoost".equals(currentFieldName)) { @@ -74,19 +74,19 @@ public class BoostingQueryParser implements QueryParser { } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new QueryParsingException(parseContext.index(), "[boosting] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[boosting] query does not support [" + currentFieldName + "]"); } } } if (positiveQuery == null && !positiveQueryFound) { - throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'positive' query to be set'"); + throw new QueryParsingException(parseContext, "[boosting] query requires 'positive' query to be set'"); } if (negativeQuery == null && !negativeQueryFound) { - throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'negative' query to be set'"); + throw new QueryParsingException(parseContext, "[boosting] query requires 'negative' query to be set'"); } if (negativeBoost == -1) { - throw new QueryParsingException(parseContext.index(), "[boosting] query requires 'negative_boost' to be set'"); + throw new QueryParsingException(parseContext, "[boosting] query requires 'negative_boost' to be set'"); } // parsers returned null diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index 16cb0bf7e57..3e313ce8408 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -19,7 +19,9 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -73,10 +75,10 @@ public class CommonTermsQueryBuilder extends BaseQueryBuilder implements Boostab */ public CommonTermsQueryBuilder(String name, Object text) { if (name == null) { - throw new ElasticsearchIllegalArgumentException("Field name must not be null"); + throw new IllegalArgumentException("Field name must not be null"); } if (text == null) { - throw new ElasticsearchIllegalArgumentException("Query must not be null"); + throw new IllegalArgumentException("Query must not be null"); } this.text = text; this.name = name; diff --git a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java index 46997ffb7fd..29945de5686 100644 --- a/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/CommonTermsQueryParser.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; @@ -66,7 +65,7 @@ public class CommonTermsQueryParser implements QueryParser { XContentParser parser = parseContext.parser(); XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[common] query malformed, no field"); + throw new QueryParsingException(parseContext, "[common] query malformed, no field"); } String fieldName = parser.currentName(); Object value = null; @@ -97,12 +96,13 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("high_freq".equals(innerFieldName) || "highFreq".equals(innerFieldName)) { highFreqMinimumShouldMatch = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + innerFieldName + "] for [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[common] query does not support [" + innerFieldName + + "] for [" + currentFieldName + "]"); } } } } else { - throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[common] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -110,7 +110,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("analyzer".equals(currentFieldName)) { String analyzer = parser.text(); if (parseContext.analysisService().analyzer(analyzer) == null) { - throw new QueryParsingException(parseContext.index(), "[common] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[common] analyzer [" + parser.text() + "] not found"); } queryAnalyzer = analyzer; } else if ("disable_coord".equals(currentFieldName) || "disableCoord".equals(currentFieldName)) { @@ -124,7 +124,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { highFreqOccur = BooleanClause.Occur.MUST; } else { - throw new QueryParsingException(parseContext.index(), + throw new QueryParsingException(parseContext, "[common] query requires operator to be either 'and' or 'or', not [" + op + "]"); } } else if ("low_freq_operator".equals(currentFieldName) || "lowFreqOperator".equals(currentFieldName)) { @@ -134,7 +134,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { lowFreqOccur = BooleanClause.Occur.MUST; } else { - throw new QueryParsingException(parseContext.index(), + throw new QueryParsingException(parseContext, "[common] query requires operator to be either 'and' or 'or', not [" + op + "]"); } } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) { @@ -144,7 +144,7 @@ public class CommonTermsQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[common] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[common] query does not support [" + currentFieldName + "]"); } } } @@ -155,13 +155,13 @@ public class CommonTermsQueryParser implements QueryParser { token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { throw new QueryParsingException( - parseContext.index(), + parseContext, "[common] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); } } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No text specified for text query"); + throw new QueryParsingException(parseContext, "No text specified for text query"); } FieldMapper mapper = null; String field; @@ -187,7 +187,7 @@ public class CommonTermsQueryParser implements QueryParser { } else { analyzer = parseContext.mapperService().analysisService().analyzer(queryAnalyzer); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + queryAnalyzer + "]"); + throw new IllegalArgumentException("No analyzer found for [" + queryAnalyzer + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java index 78c5879b63f..593643abc52 100644 --- a/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -55,14 +53,14 @@ public class ConstantScoreQueryParser implements QueryParser { Query query = null; boolean queryFound = false; float boost = 1.0f; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String currentFieldName = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { filter = parseContext.parseInnerFilter(); @@ -71,22 +69,18 @@ public class ConstantScoreQueryParser implements QueryParser { query = parseContext.parseInnerQuery(); queryFound = true; } else { - throw new QueryParsingException(parseContext.index(), "[constant_score] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[constant_score] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[constant_score] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[constant_score] query does not support [" + currentFieldName + "]"); } } } if (!filterFound && !queryFound) { - throw new QueryParsingException(parseContext.index(), "[constant_score] requires either 'filter' or 'query' element"); + throw new QueryParsingException(parseContext, "[constant_score] requires either 'filter' or 'query' element"); } if (query == null && filter == null) { @@ -94,11 +88,6 @@ public class ConstantScoreQueryParser implements QueryParser { } if (filter != null) { - // cache the filter if possible needed - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - Query query1 = new ConstantScoreQuery(filter); query1.setBoost(boost); return query1; diff --git a/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java b/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java index 82feb9854a5..2747387fbd7 100644 --- a/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/DisMaxQueryParser.java @@ -70,7 +70,7 @@ public class DisMaxQueryParser implements QueryParser { queries.add(query); } } else { - throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[dis_max] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("queries".equals(currentFieldName)) { @@ -83,7 +83,7 @@ public class DisMaxQueryParser implements QueryParser { token = parser.nextToken(); } } else { - throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[dis_max] query does not support [" + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -93,13 +93,13 @@ public class DisMaxQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[dis_max] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[dis_max] query does not support [" + currentFieldName + "]"); } } } if (!queriesFound) { - throw new QueryParsingException(parseContext.index(), "[dis_max] requires 'queries' field"); + throw new QueryParsingException(parseContext, "[dis_max] requires 'queries' field"); } if (queries.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java index eb03586adf2..8dc0a3eb2c1 100644 --- a/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ExistsFilterParser.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; -import org.apache.lucene.search.TermRangeFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.HashedBytesRef; @@ -71,13 +70,13 @@ public class ExistsFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[exists] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[exists] filter does not support [" + currentFieldName + "]"); } } } if (fieldPattern == null) { - throw new QueryParsingException(parseContext.index(), "exists must be provided with a [field]"); + throw new QueryParsingException(parseContext, "exists must be provided with a [field]"); } return newFilter(parseContext, fieldPattern, filterName); @@ -122,11 +121,7 @@ public class ExistsFilterParser implements FilterParser { boolFilter.add(filter, BooleanClause.Occur.SHOULD); } - Filter filter = Queries.wrap(boolFilter); - // we always cache this one, really does not change... (exists) - // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment... - filter = parseContext.cacheFilter(filter, new HashedBytesRef("$exists$" + fieldPattern), parseContext.autoFilterCachePolicy()); - + Filter filter = new QueryWrapperFilter(boolFilter); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java index cb821912ca9..b349dd3e65c 100644 --- a/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/FQueryFilterParser.java @@ -21,10 +21,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -52,8 +50,6 @@ public class FQueryFilterParser implements FilterParser { Query query = null; boolean queryFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -61,35 +57,30 @@ public class FQueryFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { queryFound = true; query = parseContext.parseInnerQuery(); } else { - throw new QueryParsingException(parseContext.index(), "[fquery] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[fquery] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.autoFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[fquery] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[fquery] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[fquery] requires 'query' element"); + throw new QueryParsingException(parseContext, "[fquery] requires 'query' element"); } if (query == null) { return null; } - Filter filter = Queries.wrap(query, parseContext); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(query); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java index 2b69cf61561..1e8fd7cfa03 100644 --- a/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryParser.java @@ -64,11 +64,12 @@ public class FieldMaskingSpanQueryParser implements QueryParser { if ("query".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "[field_masking_span] query] must be of type span query"); + throw new QueryParsingException(parseContext, "[field_masking_span] query] must be of type span query"); } inner = (SpanQuery) query; } else { - throw new QueryParsingException(parseContext.index(), "[field_masking_span] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[field_masking_span] query does not support [" + + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -78,15 +79,15 @@ public class FieldMaskingSpanQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[field_masking_span] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[field_masking_span] query does not support [" + currentFieldName + "]"); } } } if (inner == null) { - throw new QueryParsingException(parseContext.index(), "field_masking_span must have [query] span query clause"); + throw new QueryParsingException(parseContext, "field_masking_span must have [query] span query clause"); } if (field == null) { - throw new QueryParsingException(parseContext.index(), "field_masking_span must have [field] set for it"); + throw new QueryParsingException(parseContext, "field_masking_span must have [field] set for it"); } FieldMapper mapper = parseContext.fieldMapper(field); diff --git a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java index 50d0b3673fb..77eb4d136ca 100644 --- a/src/main/java/org/elasticsearch/index/query/FilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/FilterBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentType; @@ -29,8 +28,8 @@ import org.elasticsearch.common.xcontent.XContentType; */ public interface FilterBuilder extends ToXContent { - BytesReference buildAsBytes() throws ElasticsearchException; + BytesReference buildAsBytes(); - BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException; + BytesReference buildAsBytes(XContentType contentType); } diff --git a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java index e1e27eec64b..9e2f8e133b3 100644 --- a/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FilteredQueryParser.java @@ -23,9 +23,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -55,8 +53,6 @@ public class FilteredQueryParser implements QueryParser { Filter filter = null; boolean filterFound = false; float boost = 1.0f; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String queryName = null; String currentFieldName = null; @@ -66,6 +62,8 @@ public class FilteredQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { query = parseContext.parseInnerQuery(); @@ -73,7 +71,7 @@ public class FilteredQueryParser implements QueryParser { filterFound = true; filter = parseContext.parseInnerFilter(); } else { - throw new QueryParsingException(parseContext.index(), "[filtered] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[filtered] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("strategy".equals(currentFieldName)) { @@ -93,18 +91,14 @@ public class FilteredQueryParser implements QueryParser { } else if ("leap_frog_filter_first".equals(value) || "leapFrogFilterFirst".equals(value)) { filterStrategy = FilteredQuery.LEAP_FROG_FILTER_FIRST_STRATEGY; } else { - throw new QueryParsingException(parseContext.index(), "[filtered] strategy value not supported [" + value + "]"); + throw new QueryParsingException(parseContext, "[filtered] strategy value not supported [" + value + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[filtered] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[filtered] query does not support [" + currentFieldName + "]"); } } } @@ -129,11 +123,6 @@ public class FilteredQueryParser implements QueryParser { return query; } - // cache if required - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - // if its a match_all query, use constant_score if (Queries.isConstantMatchAllQuery(query)) { Query q = new ConstantScoreQuery(filter); diff --git a/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java b/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java index 243f86534cd..229fcc95c72 100644 --- a/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/FuzzyQueryParser.java @@ -57,7 +57,7 @@ public class FuzzyQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[fuzzy] query malformed, no field"); + throw new QueryParsingException(parseContext, "[fuzzy] query malformed, no field"); } String fieldName = parser.currentName(); @@ -95,7 +95,7 @@ public class FuzzyQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[fuzzy] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[fuzzy] query does not support [" + currentFieldName + "]"); } } } @@ -107,7 +107,7 @@ public class FuzzyQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for fuzzy query"); + throw new QueryParsingException(parseContext, "No value specified for fuzzy query"); } Query query = null; diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java index c2133f10d77..40b41c7ffbd 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -43,9 +42,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { private double[] box = {Double.NaN, Double.NaN, Double.NaN, Double.NaN}; - private Boolean cache; - private String cacheKey; - private String filterName; private String type; @@ -141,19 +137,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoBoundingBoxFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoBoundingBoxFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the type of executing of the geo bounding box. Can be either `memory` or `indexed`. Defaults * to `memory`. @@ -167,13 +150,13 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { protected void doXContent(XContentBuilder builder, Params params) throws IOException { // check values if(Double.isNaN(box[TOP])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires top latitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires top latitude to be set"); } else if(Double.isNaN(box[BOTTOM])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires bottom latitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires bottom latitude to be set"); } else if(Double.isNaN(box[RIGHT])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires right longitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires right longitude to be set"); } else if(Double.isNaN(box[LEFT])) { - throw new ElasticsearchIllegalArgumentException("geo_bounding_box requires left longitude to be set"); + throw new IllegalArgumentException("geo_bounding_box requires left longitude to be set"); } builder.startObject(GeoBoundingBoxFilterParser.NAME); @@ -186,12 +169,6 @@ public class GeoBoundingBoxFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (type != null) { builder.field("type", type); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java index 8f68dbea074..6441b8d4b07 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxFilterParser.java @@ -20,12 +20,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; import org.elasticsearch.index.mapper.FieldMapper; @@ -72,8 +70,6 @@ public class GeoBoundingBoxFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; double top = Double.NaN; @@ -100,7 +96,9 @@ public class GeoBoundingBoxFilterParser implements FilterParser { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); token = parser.nextToken(); - if (FIELD.equals(currentFieldName)) { + if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip + } else if (FIELD.equals(currentFieldName)) { fieldName = parser.text(); } else if (TOP.equals(currentFieldName)) { top = parser.doubleValue(); @@ -138,16 +136,12 @@ public class GeoBoundingBoxFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("normalize".equals(currentFieldName)) { normalize = parser.booleanValue(); } else if ("type".equals(currentFieldName)) { type = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[geo_bbox] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_bbox] filter does not support [" + currentFieldName + "]"); } } } @@ -169,11 +163,11 @@ public class GeoBoundingBoxFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); @@ -184,12 +178,10 @@ public class GeoBoundingBoxFilterParser implements FilterParser { IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); filter = new InMemoryGeoBoundingBoxFilter(topLeft, bottomRight, indexFieldData); } else { - throw new QueryParsingException(parseContext.index(), "geo bounding box type [" + type + "] not supported, either 'indexed' or 'memory' are allowed"); + throw new QueryParsingException(parseContext, "geo bounding box type [" + type + + "] not supported, either 'indexed' or 'memory' are allowed"); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java index 6a07c285c1a..a45aee92c6c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterBuilder.java @@ -45,9 +45,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { private String optimizeBbox; - private Boolean cache; - private String cacheKey; - private String filterName; public GeoDistanceFilterBuilder(String name) { @@ -103,19 +100,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoDistanceFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoDistanceFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoDistanceFilterParser.NAME); @@ -134,12 +118,6 @@ public class GeoDistanceFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java index 252afdf25cf..ff46d591f3c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceFilterParser.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -64,8 +62,6 @@ public class GeoDistanceFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; GeoPoint point = new GeoPoint(); @@ -80,6 +76,8 @@ public class GeoDistanceFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { fieldName = currentFieldName; GeoUtils.parseGeoPoint(parser, point); @@ -98,7 +96,8 @@ public class GeoDistanceFilterParser implements FilterParser { } else if (currentName.equals(GeoPointFieldMapper.Names.GEOHASH)) { GeoHashUtils.decode(parser.text(), point); } else { - throw new QueryParsingException(parseContext.index(), "[geo_distance] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_distance] filter does not support [" + currentFieldName + + "]"); } } } @@ -124,10 +123,6 @@ public class GeoDistanceFilterParser implements FilterParser { fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length()); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) { optimizeBbox = parser.textOrNull(); } else if ("normalize".equals(currentFieldName)) { @@ -141,7 +136,7 @@ public class GeoDistanceFilterParser implements FilterParser { } if (vDistance == null) { - throw new QueryParsingException(parseContext.index(), "geo_distance requires 'distance' to be specified"); + throw new QueryParsingException(parseContext, "geo_distance requires 'distance' to be specified"); } else if (vDistance instanceof Number) { distance = DistanceUnit.DEFAULT.convert(((Number) vDistance).doubleValue(), unit); } else { @@ -155,20 +150,17 @@ public class GeoDistanceFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoDistanceFilter(point.lat(), point.lon(), distance, geoDistance, indexFieldData, geoMapper, optimizeBbox); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java index 343e50e3efb..c21cd3d62d7 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterBuilder.java @@ -45,9 +45,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { private GeoDistance geoDistance; - private Boolean cache; - private String cacheKey; - private String filterName; private String optimizeBbox; @@ -139,19 +136,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoDistanceRangeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoDistanceRangeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoDistanceRangeFilterParser.NAME); @@ -173,12 +157,6 @@ public class GeoDistanceRangeFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java index b7452bec0f1..9322a230c01 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoDistanceRangeFilterParser.java @@ -20,13 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -64,8 +62,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; GeoPoint point = new GeoPoint(); @@ -82,6 +78,8 @@ public class GeoDistanceRangeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { GeoUtils.parseGeoPoint(parser, point); fieldName = currentFieldName; @@ -155,10 +153,6 @@ public class GeoDistanceRangeFilterParser implements FilterParser { fieldName = currentFieldName.substring(0, currentFieldName.length() - GeoPointFieldMapper.Names.GEOHASH_SUFFIX.length()); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("optimize_bbox".equals(currentFieldName) || "optimizeBbox".equals(currentFieldName)) { optimizeBbox = parser.textOrNull(); } else if ("normalize".equals(currentFieldName)) { @@ -196,19 +190,16 @@ public class GeoDistanceRangeFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoDistanceRangeFilter(point, from, to, includeLower, includeUpper, geoDistance, geoMapper, indexFieldData, optimizeBbox); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java index e32a1e58e1b..fd0a2f569c4 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; + import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -38,9 +39,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { private final List shell = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public GeoPolygonFilterBuilder(String name) { @@ -75,19 +73,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public GeoPolygonFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public GeoPolygonFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(GeoPolygonFilterParser.NAME); @@ -103,12 +88,6 @@ public class GeoPolygonFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java index fefa37c07e3..f3f41ac7126 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoPolygonFilterParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import com.google.common.collect.Lists; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; @@ -68,8 +66,6 @@ public class GeoPolygonFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; List shell = Lists.newArrayList(); @@ -84,6 +80,8 @@ public class GeoPolygonFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; @@ -96,42 +94,40 @@ public class GeoPolygonFilterParser implements FilterParser { shell.add(GeoUtils.parseGeoPoint(parser)); } } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] filter does not support [" + currentFieldName + + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support token type [" + token.name() + "] under [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] filter does not support token type [" + token.name() + + "] under [" + currentFieldName + "]"); } } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("normalize".equals(currentFieldName)) { normalizeLat = parser.booleanValue(); normalizeLon = parser.booleanValue(); } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] filter does not support [" + currentFieldName + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[geo_polygon] unexpected token type [" + token.name() + "]"); + throw new QueryParsingException(parseContext, "[geo_polygon] unexpected token type [" + token.name() + "]"); } } if (shell.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "no points defined for geo_polygon filter"); + throw new QueryParsingException(parseContext, "no points defined for geo_polygon filter"); } else { if (shell.size() < 3) { - throw new QueryParsingException(parseContext.index(), "too few points defined for geo_polygon filter"); + throw new QueryParsingException(parseContext, "too few points defined for geo_polygon filter"); } GeoPoint start = shell.get(0); if (!start.equals(shell.get(shell.size() - 1))) { shell.add(start); } if (shell.size() < 4) { - throw new QueryParsingException(parseContext.index(), "too few points defined for geo_polygon filter"); + throw new QueryParsingException(parseContext, "too few points defined for geo_polygon filter"); } } @@ -143,18 +139,15 @@ public class GeoPolygonFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } IndexGeoPointFieldData indexFieldData = parseContext.getForField(mapper); Filter filter = new GeoPolygonFilter(indexFieldData, shell.toArray(new GeoPoint[shell.size()])); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java index 4ff26d7aacd..1ac7b14481c 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterBuilder.java @@ -37,9 +37,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { private SpatialStrategy strategy = null; - private Boolean cache; - private String cacheKey; - private String filterName; private final String indexedShapeId; @@ -93,28 +90,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { this.indexedShapeType = indexedShapeType; } - /** - * Sets whether the filter will be cached. - * - * @param cache Whether filter will be cached - * @return this - */ - public GeoShapeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - /** - * Sets the key used for the filter if it is cached - * - * @param cacheKey Key for the Filter if cached - * @return this - */ - public GeoShapeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the name of the filter * @@ -205,12 +180,6 @@ public class GeoShapeFilterBuilder extends BaseFilterBuilder { if (name != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java index 72eba62854e..bba22882d27 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeFilterParser.java @@ -24,15 +24,13 @@ import com.spatial4j.core.shape.Shape; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.internal.Nullable; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -85,8 +83,6 @@ public class GeoShapeFilterParser implements FilterParser { ShapeRelation shapeRelation = ShapeRelation.INTERSECTS; String strategyName = null; ShapeBuilder shape = null; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String filterName = null; String id = null; @@ -100,6 +96,8 @@ public class GeoShapeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; @@ -113,7 +111,7 @@ public class GeoShapeFilterParser implements FilterParser { } else if ("relation".equals(currentFieldName)) { shapeRelation = ShapeRelation.getRelationByName(parser.text()); if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "Unknown shape operation [" + parser.text() + "]"); + throw new QueryParsingException(parseContext, "Unknown shape operation [" + parser.text() + "]"); } } else if ("strategy".equals(currentFieldName)) { strategyName = parser.text(); @@ -134,44 +132,40 @@ public class GeoShapeFilterParser implements FilterParser { } } if (id == null) { - throw new QueryParsingException(parseContext.index(), "ID for indexed shape not provided"); + throw new QueryParsingException(parseContext, "ID for indexed shape not provided"); } else if (type == null) { - throw new QueryParsingException(parseContext.index(), "Type for indexed shape not provided"); + throw new QueryParsingException(parseContext, "Type for indexed shape not provided"); } shape = fetchService.fetch(id, type, index, shapePath); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] filter does not support [" + currentFieldName + "]"); } } } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] filter does not support [" + currentFieldName + "]"); } } } if (shape == null) { - throw new QueryParsingException(parseContext.index(), "No Shape defined"); + throw new QueryParsingException(parseContext, "No Shape defined"); } else if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "No Shape Relation defined"); + throw new QueryParsingException(parseContext, "No Shape Relation defined"); } MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "Failed to find geo_shape field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "Failed to find geo_shape field [" + fieldName + "]"); } FieldMapper fieldMapper = smartNameFieldMappers.mapper(); // TODO: This isn't the nicest way to check this if (!(fieldMapper instanceof GeoShapeFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a geo_shape"); + throw new QueryParsingException(parseContext, "Field [" + fieldName + "] is not a geo_shape"); } GeoShapeFieldMapper shapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; @@ -189,15 +183,11 @@ public class GeoShapeFilterParser implements FilterParser { Filter intersects = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, ShapeRelation.INTERSECTS)); bool.add(exists, BooleanClause.Occur.MUST); bool.add(intersects, BooleanClause.Occur.MUST_NOT); - filter = Queries.wrap(bool); + filter = new QueryWrapperFilter(bool); } else { filter = strategy.makeFilter(GeoShapeQueryParser.getArgs(shape, shapeRelation)); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java index ac732bc99aa..ac3d4f59f92 100644 --- a/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/GeoShapeQueryParser.java @@ -28,7 +28,6 @@ import org.apache.lucene.spatial.prefix.PrefixTreeStrategy; import org.apache.lucene.spatial.prefix.RecursivePrefixTreeStrategy; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; @@ -94,7 +93,7 @@ public class GeoShapeQueryParser implements QueryParser { } else if ("relation".equals(currentFieldName)) { shapeRelation = ShapeRelation.getRelationByName(parser.text()); if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "Unknown shape operation [" + parser.text() + " ]"); + throw new QueryParsingException(parseContext, "Unknown shape operation [" + parser.text() + " ]"); } } else if ("indexed_shape".equals(currentFieldName) || "indexedShape".equals(currentFieldName)) { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -113,13 +112,13 @@ public class GeoShapeQueryParser implements QueryParser { } } if (id == null) { - throw new QueryParsingException(parseContext.index(), "ID for indexed shape not provided"); + throw new QueryParsingException(parseContext, "ID for indexed shape not provided"); } else if (type == null) { - throw new QueryParsingException(parseContext.index(), "Type for indexed shape not provided"); + throw new QueryParsingException(parseContext, "Type for indexed shape not provided"); } shape = fetchService.fetch(id, type, index, shapePath); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] query does not support [" + currentFieldName + "]"); } } } @@ -129,26 +128,26 @@ public class GeoShapeQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[geo_shape] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[geo_shape] query does not support [" + currentFieldName + "]"); } } } if (shape == null) { - throw new QueryParsingException(parseContext.index(), "No Shape defined"); + throw new QueryParsingException(parseContext, "No Shape defined"); } else if (shapeRelation == null) { - throw new QueryParsingException(parseContext.index(), "No Shape Relation defined"); + throw new QueryParsingException(parseContext, "No Shape Relation defined"); } MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(fieldName); if (smartNameFieldMappers == null || !smartNameFieldMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "Failed to find geo_shape field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "Failed to find geo_shape field [" + fieldName + "]"); } FieldMapper fieldMapper = smartNameFieldMappers.mapper(); // TODO: This isn't the nicest way to check this if (!(fieldMapper instanceof GeoShapeFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "Field [" + fieldName + "] is not a geo_shape"); + throw new QueryParsingException(parseContext, "Field [" + fieldName + "] is not a geo_shape"); } GeoShapeFieldMapper shapeFieldMapper = (GeoShapeFieldMapper) fieldMapper; @@ -191,7 +190,7 @@ public class GeoShapeQueryParser implements QueryParser { case WITHIN: return new SpatialArgs(SpatialOperation.IsWithin, shape.build()); default: - throw new ElasticsearchIllegalArgumentException(""); + throw new IllegalArgumentException(""); } } diff --git a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java index 9e69bc25a89..e390bf53ea7 100644 --- a/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java +++ b/src/main/java/org/elasticsearch/index/query/GeohashCellFilter.java @@ -20,8 +20,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -29,7 +27,6 @@ import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -63,8 +60,6 @@ public class GeohashCellFilter { public static final String NAME = "geohash_cell"; public static final String NEIGHBORS = "neighbors"; public static final String PRECISION = "precision"; - public static final String CACHE = "_cache"; - public static final String CACHE_KEY = "_cache_key"; /** * Create a new geohash filter for a given set of geohashes. In general this method @@ -78,7 +73,7 @@ public class GeohashCellFilter { */ public static Filter create(QueryParseContext context, GeoPointFieldMapper fieldMapper, String geohash, @Nullable List geohashes) { if (fieldMapper.geoHashStringMapper() == null) { - throw new ElasticsearchIllegalArgumentException("geohash filter needs geohash_prefix to be enabled"); + throw new IllegalArgumentException("geohash filter needs geohash_prefix to be enabled"); } StringFieldMapper geoHashMapper = fieldMapper.geoHashStringMapper(); @@ -104,8 +99,6 @@ public class GeohashCellFilter { private String geohash; private int levels = -1; private boolean neighbors; - private Boolean cache; - private String cacheKey; public Builder(String field) { @@ -162,19 +155,6 @@ public class GeohashCellFilter { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public Builder cache(boolean cache) { - this.cache = cache; - return this; - } - - public Builder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(NAME); @@ -184,12 +164,6 @@ public class GeohashCellFilter { if(levels > 0) { builder.field(PRECISION, levels); } - if (cache != null) { - builder.field(CACHE, cache); - } - if (cacheKey != null) { - builder.field(CACHE_KEY, cacheKey); - } builder.field(field, geohash); builder.endObject(); @@ -215,8 +189,6 @@ public class GeohashCellFilter { String geohash = null; int levels = -1; boolean neighbors = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; XContentParser.Token token; @@ -228,7 +200,9 @@ public class GeohashCellFilter { if (token == Token.FIELD_NAME) { String field = parser.text(); - if (PRECISION.equals(field)) { + if (parseContext.isDeprecatedSetting(field)) { + // skip + } else if (PRECISION.equals(field)) { token = parser.nextToken(); if(token == Token.VALUE_NUMBER) { levels = parser.intValue(); @@ -239,12 +213,6 @@ public class GeohashCellFilter { } else if (NEIGHBORS.equals(field)) { parser.nextToken(); neighbors = parser.booleanValue(); - } else if (CACHE.equals(field)) { - parser.nextToken(); - cache = parseContext.parseFilterCachePolicy(); - } else if (CACHE_KEY.equals(field)) { - parser.nextToken(); - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = field; token = parser.nextToken(); @@ -266,22 +234,23 @@ public class GeohashCellFilter { } if (geohash == null) { - throw new QueryParsingException(parseContext.index(), "no geohash value provided to geohash_cell filter"); + throw new QueryParsingException(parseContext, "no geohash value provided to geohash_cell filter"); } MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "failed to find geo_point field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "failed to find geo_point field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.mapper(); if (!(mapper instanceof GeoPointFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "field [" + fieldName + "] is not a geo_point field"); + throw new QueryParsingException(parseContext, "field [" + fieldName + "] is not a geo_point field"); } GeoPointFieldMapper geoMapper = ((GeoPointFieldMapper) mapper); if (!geoMapper.isEnableGeohashPrefix()) { - throw new QueryParsingException(parseContext.index(), "can't execute geohash_cell on field [" + fieldName + "], geohash_prefix is not enabled"); + throw new QueryParsingException(parseContext, "can't execute geohash_cell on field [" + fieldName + + "], geohash_prefix is not enabled"); } if(levels > 0) { @@ -296,10 +265,6 @@ public class GeohashCellFilter { filter = create(parseContext, geoMapper, geohash, null); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } - return filter; } } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java index 0f83e3408bb..8bf761b84a0 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildFilterParser.java @@ -21,10 +21,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; @@ -33,16 +35,12 @@ import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - /** * */ @@ -64,7 +62,6 @@ public class HasChildFilterParser implements FilterParser { @Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -83,6 +80,8 @@ public class HasChildFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // Usually, the query would be parsed here, but the child // type may not have been extracted yet, so use the @@ -97,17 +96,13 @@ public class HasChildFilterParser implements FilterParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) { childType = parser.text(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - // noop to be backwards compatible - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - // noop to be backwards compatible } else if ("short_circuit_cutoff".equals(currentFieldName)) { shortCircuitParentDocSet = parser.intValue(); } else if ("min_children".equals(currentFieldName) || "minChildren".equals(currentFieldName)) { @@ -115,15 +110,15 @@ public class HasChildFilterParser implements FilterParser { } else if ("max_children".equals(currentFieldName) || "maxChildren".equals(currentFieldName)) { maxChildren = parser.intValue(true); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound && !filterFound) { - throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[has_child] filter requires 'query' or 'filter' field"); } if (childType == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] filter requires 'type' field"); + throw new QueryParsingException(parseContext, "[has_child] filter requires 'type' field"); } Query query; @@ -139,7 +134,7 @@ public class HasChildFilterParser implements FilterParser { DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType); if (childDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]"); + throw new QueryParsingException(parseContext, "No mapping for for type [" + childType + "]"); } if (innerHits != null) { InnerHitsContext.ParentChildInnerHits parentChildInnerHits = new InnerHitsContext.ParentChildInnerHits(innerHits.v2(), query, null, childDocMapper); @@ -148,28 +143,29 @@ public class HasChildFilterParser implements FilterParser { } ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping"); + throw new QueryParsingException(parseContext, "Type [" + childType + "] does not have parent mapping"); } String parentType = parentFieldMapper.type(); // wrap the query with type query - query = new FilteredQuery(query, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + query = new FilteredQuery(query, childDocMapper.typeFilter()); DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] points to a non existent parent type [" + parentType + "]"); + throw new QueryParsingException(parseContext, "[has_child] Type [" + childType + "] points to a non existent parent type [" + + parentType + "]"); } if (maxChildren > 0 && maxChildren < minChildren) { - throw new QueryParsingException(parseContext.index(), "[has_child] 'max_children' is less than 'min_children'"); + throw new QueryParsingException(parseContext, "[has_child] 'max_children' is less than 'min_children'"); } BitDocIdSetFilter nonNestedDocsFilter = null; if (parentDocMapper.hasNestedObjects()) { - nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } - Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); + Filter parentFilter = parentDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); Query childrenQuery; @@ -180,9 +176,9 @@ public class HasChildFilterParser implements FilterParser { shortCircuitParentDocSet, nonNestedDocsFilter); } if (filterName != null) { - parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(childrenQuery)); + parseContext.addNamedFilter(filterName, new QueryWrapperFilter(childrenQuery)); } - return new CustomQueryWrappingFilter(childrenQuery); + return new QueryWrapperFilter(childrenQuery); } } diff --git a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java index c7e8f2567d4..2e55395535c 100644 --- a/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasChildQueryParser.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; @@ -34,16 +35,12 @@ import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; import org.elasticsearch.index.search.child.ChildrenConstantScoreQuery; import org.elasticsearch.index.search.child.ChildrenQuery; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - /** * */ @@ -65,7 +62,6 @@ public class HasChildQueryParser implements QueryParser { @Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -95,7 +91,7 @@ public class HasChildQueryParser implements QueryParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "child_type".equals(currentFieldName) || "childType".equals(currentFieldName)) { @@ -115,15 +111,15 @@ public class HasChildQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[has_child] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_child] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[has_child] requires 'query' field"); + throw new QueryParsingException(parseContext, "[has_child] requires 'query' field"); } if (childType == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] requires 'type' field"); + throw new QueryParsingException(parseContext, "[has_child] requires 'type' field"); } Query innerQuery = iq.asQuery(childType); @@ -135,10 +131,10 @@ public class HasChildQueryParser implements QueryParser { DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType); if (childDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] No mapping for for type [" + childType + "]"); + throw new QueryParsingException(parseContext, "[has_child] No mapping for for type [" + childType + "]"); } if (!childDocMapper.parentFieldMapper().active()) { - throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType + "] does not have parent mapping"); + throw new QueryParsingException(parseContext, "[has_child] Type [" + childType + "] does not have parent mapping"); } if (innerHits != null) { @@ -149,30 +145,30 @@ public class HasChildQueryParser implements QueryParser { ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new QueryParsingException(parseContext.index(), "[has_child] _parent field not configured"); + throw new QueryParsingException(parseContext, "[has_child] _parent field not configured"); } String parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_child] Type [" + childType - + "] points to a non existent parent type [" + parentType + "]"); + throw new QueryParsingException(parseContext, "[has_child] Type [" + childType + "] points to a non existent parent type [" + + parentType + "]"); } if (maxChildren > 0 && maxChildren < minChildren) { - throw new QueryParsingException(parseContext.index(), "[has_child] 'max_children' is less than 'min_children'"); + throw new QueryParsingException(parseContext, "[has_child] 'max_children' is less than 'min_children'"); } BitDocIdSetFilter nonNestedDocsFilter = null; if (parentDocMapper.hasNestedObjects()) { - nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + innerQuery = new FilteredQuery(innerQuery, childDocMapper.typeFilter()); Query query; - Filter parentFilter = parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy()); + Filter parentFilter = parentDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); if (minChildren > 1 || maxChildren > 0 || scoreType != ScoreType.NONE) { query = new ChildrenQuery(parentChildIndexFieldData, parentType, childType, parentFilter, innerQuery, scoreType, minChildren, @@ -182,7 +178,7 @@ public class HasChildQueryParser implements QueryParser { shortCircuitParentDocSet, nonNestedDocsFilter); } if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedQuery(queryName, query); } query.setBoost(boost); return query; diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java index 62a96debf8c..8f565022c4f 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterBuilder.java @@ -62,20 +62,6 @@ public class HasParentFilterBuilder extends BaseFilterBuilder { return this; } - /** - * This is a noop since has_parent can't be cached. - */ - public HasParentFilterBuilder cache(boolean cache) { - return this; - } - - /** - * This is a noop since has_parent can't be cached. - */ - public HasParentFilterBuilder cacheKey(String cacheKey) { - return this; - } - /** * Sets inner hit definition in the scope of this filter and reusing the defined type and query. */ diff --git a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java index 2f549c2f674..331f575df77 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentFilterParser.java @@ -20,19 +20,18 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; import static org.elasticsearch.index.query.HasParentQueryParser.createParentQuery; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; /** * @@ -55,7 +54,6 @@ public class HasParentFilterParser implements FilterParser { @Override public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -71,6 +69,8 @@ public class HasParentFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // Usually, the query would be parsed here, but the child // type may not have been extracted yet, so use the @@ -85,27 +85,23 @@ public class HasParentFilterParser implements FilterParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) { parentType = parser.text(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - // noop to be backwards compatible - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - // noop to be backwards compatible } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] filter does not support [" + currentFieldName + "]"); } } } if (!queryFound && !filterFound) { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[has_parent] filter requires 'query' or 'filter' field"); } if (parentType == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] filter requires 'parent_type' field"); + throw new QueryParsingException(parseContext, "[has_parent] filter requires 'parent_type' field"); } Query innerQuery; @@ -124,9 +120,9 @@ public class HasParentFilterParser implements FilterParser { return null; } if (filterName != null) { - parseContext.addNamedFilter(filterName, new CustomQueryWrappingFilter(parentQuery)); + parseContext.addNamedFilter(filterName, new QueryWrapperFilter(parentQuery)); } - return new CustomQueryWrappingFilter(parentQuery); + return new QueryWrapperFilter(parentQuery); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java index 2325d2840e8..0d718efdb69 100644 --- a/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/HasParentQueryParser.java @@ -34,7 +34,6 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ParentConstantScoreQuery; import org.elasticsearch.index.search.child.ParentQuery; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -44,8 +43,6 @@ import java.io.IOException; import java.util.HashSet; import java.util.Set; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - public class HasParentQueryParser implements QueryParser { public static final String NAME = "has_parent"; @@ -64,7 +61,6 @@ public class HasParentQueryParser implements QueryParser { @Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -91,7 +87,7 @@ public class HasParentQueryParser implements QueryParser { } else if ("inner_hits".equals(currentFieldName)) { innerHits = innerHitsQueryParserHelper.parse(parseContext); } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "parent_type".equals(currentFieldName) || "parentType".equals(currentFieldName)) { @@ -115,15 +111,15 @@ public class HasParentQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[has_parent] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[has_parent] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'query' field"); + throw new QueryParsingException(parseContext, "[has_parent] query requires 'query' field"); } if (parentType == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] query requires 'parent_type' field"); + throw new QueryParsingException(parseContext, "[has_parent] query requires 'parent_type' field"); } Query innerQuery = iq.asQuery(parentType); @@ -140,7 +136,7 @@ public class HasParentQueryParser implements QueryParser { query.setBoost(boost); if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedQuery(queryName, query); } return query; } @@ -148,7 +144,8 @@ public class HasParentQueryParser implements QueryParser { static Query createParentQuery(Query innerQuery, String parentType, boolean score, QueryParseContext parseContext, Tuple innerHits) { DocumentMapper parentDocMapper = parseContext.mapperService().documentMapper(parentType); if (parentDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] query configured 'parent_type' [" + parentType + "] is not a valid type"); + throw new QueryParsingException(parseContext, "[has_parent] query configured 'parent_type' [" + parentType + + "] is not a valid type"); } if (innerHits != null) { @@ -172,7 +169,7 @@ public class HasParentQueryParser implements QueryParser { } } if (parentChildIndexFieldData == null) { - throw new QueryParsingException(parseContext.index(), "[has_parent] no _parent field configured"); + throw new QueryParsingException(parseContext, "[has_parent] no _parent field configured"); } Filter parentFilter = null; @@ -189,7 +186,7 @@ public class HasParentQueryParser implements QueryParser { parentsFilter.add(documentMapper.typeFilter(), BooleanClause.Occur.SHOULD); } } - parentFilter = Queries.wrap(parentsFilter); + parentFilter = new QueryWrapperFilter(parentsFilter); } if (parentFilter == null) { @@ -197,8 +194,8 @@ public class HasParentQueryParser implements QueryParser { } // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(parentDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); - Filter childrenFilter = parseContext.cacheFilter(Queries.wrap(Queries.not(parentFilter)), null, parseContext.autoFilterCachePolicy()); + innerQuery = new FilteredQuery(innerQuery, parentDocMapper.typeFilter()); + Filter childrenFilter = new QueryWrapperFilter(Queries.not(parentFilter)); if (score) { return new ParentQuery(parentChildIndexFieldData, innerQuery, parentDocMapper.type(), childrenFilter); } else { @@ -206,4 +203,4 @@ public class HasParentQueryParser implements QueryParser { } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java index d0402aabf95..23d4c9c1483 100644 --- a/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/IdsFilterParser.java @@ -24,6 +24,7 @@ import com.google.common.collect.Iterables; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.Queries; @@ -68,7 +69,7 @@ public class IdsFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { BytesRef value = parser.utf8BytesOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term filter"); + throw new QueryParsingException(parseContext, "No value specified for term filter"); } ids.add(value); } @@ -77,12 +78,12 @@ public class IdsFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No type specified for term filter"); + throw new QueryParsingException(parseContext, "No type specified for term filter"); } types.add(value); } } else { - throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) { @@ -90,13 +91,13 @@ public class IdsFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[ids] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] filter does not support [" + currentFieldName + "]"); } } } if (!idsProvided) { - throw new QueryParsingException(parseContext.index(), "[ids] filter requires providing a values element"); + throw new QueryParsingException(parseContext, "[ids] filter requires providing a values element"); } if (ids.isEmpty()) { @@ -109,7 +110,7 @@ public class IdsFilterParser implements FilterParser { types = parseContext.mapperService().types(); } - Filter filter = Queries.wrap(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(types, ids))); + Filter filter = new QueryWrapperFilter(new TermsQuery(UidFieldMapper.NAME, Uid.createTypeUids(types, ids))); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java index d0345944c66..3789b3039c0 100644 --- a/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/IdsQueryParser.java @@ -74,12 +74,12 @@ public class IdsQueryParser implements QueryParser { (token == XContentParser.Token.VALUE_NUMBER)) { BytesRef value = parser.utf8BytesOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term filter"); + throw new QueryParsingException(parseContext, "No value specified for term filter"); } ids.add(value); } else { - throw new QueryParsingException(parseContext.index(), - "Illegal value for id, expecting a string or number, got: " + token); + throw new QueryParsingException(parseContext, "Illegal value for id, expecting a string or number, got: " + + token); } } } else if ("types".equals(currentFieldName) || "type".equals(currentFieldName)) { @@ -87,12 +87,12 @@ public class IdsQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No type specified for term filter"); + throw new QueryParsingException(parseContext, "No type specified for term filter"); } types.add(value); } } else { - throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName) || "_type".equals(currentFieldName)) { @@ -102,13 +102,13 @@ public class IdsQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[ids] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[ids] query does not support [" + currentFieldName + "]"); } } } if (!idsProvided) { - throw new QueryParsingException(parseContext.index(), "[ids] query, no ids values provided"); + throw new QueryParsingException(parseContext, "[ids] query, no ids values provided"); } if (ids.isEmpty()) { diff --git a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java index 348e9cacba1..4d04e8e675b 100644 --- a/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java +++ b/src/main/java/org/elasticsearch/index/query/IndexQueryParserService.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; @@ -94,8 +93,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { final BitsetFilterCache bitsetFilterCache; - final QueryCachingPolicy autoFilterCachePolicy; - private final Map queryParsers; private final Map filterParsers; @@ -111,7 +108,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { ScriptService scriptService, AnalysisService analysisService, MapperService mapperService, IndexCache indexCache, IndexFieldDataService fieldDataService, BitsetFilterCache bitsetFilterCache, - QueryCachingPolicy autoFilterCachePolicy, @Nullable SimilarityService similarityService, @Nullable Map namedQueryParsers, @Nullable Map namedFilterParsers) { @@ -123,7 +119,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { this.indexCache = indexCache; this.fieldDataService = fieldDataService; this.bitsetFilterCache = bitsetFilterCache; - this.autoFilterCachePolicy = autoFilterCachePolicy; this.defaultField = indexSettings.get(DEFAULT_FIELD, AllFieldMapper.NAME); this.queryStringLenient = indexSettings.getAsBoolean(QUERY_STRING_LENIENT, false); @@ -185,10 +180,6 @@ public class IndexQueryParserService extends AbstractIndexComponent { return this.defaultField; } - public QueryCachingPolicy autoFilterCachePolicy() { - return autoFilterCachePolicy; - } - public boolean queryStringLenient() { return this.queryStringLenient; } @@ -201,7 +192,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { return filterParsers.get(name); } - public ParsedQuery parse(QueryBuilder queryBuilder) throws ElasticsearchException { + public ParsedQuery parse(QueryBuilder queryBuilder) { XContentParser parser = null; try { BytesReference bytes = queryBuilder.buildAsBytes(); @@ -210,7 +201,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(getParseContext(), "Failed to parse", e); } finally { if (parser != null) { parser.close(); @@ -218,11 +209,11 @@ public class IndexQueryParserService extends AbstractIndexComponent { } } - public ParsedQuery parse(byte[] source) throws ElasticsearchException { + public ParsedQuery parse(byte[] source) { return parse(source, 0, source.length); } - public ParsedQuery parse(byte[] source, int offset, int length) throws ElasticsearchException { + public ParsedQuery parse(byte[] source, int offset, int length) { XContentParser parser = null; try { parser = XContentFactory.xContent(source, offset, length).createParser(source, offset, length); @@ -230,7 +221,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(getParseContext(), "Failed to parse", e); } finally { if (parser != null) { parser.close(); @@ -238,11 +229,11 @@ public class IndexQueryParserService extends AbstractIndexComponent { } } - public ParsedQuery parse(BytesReference source) throws ElasticsearchException { + public ParsedQuery parse(BytesReference source) { return parse(cache.get(), source); } - public ParsedQuery parse(QueryParseContext context, BytesReference source) throws ElasticsearchException { + public ParsedQuery parse(QueryParseContext context, BytesReference source) { XContentParser parser = null; try { parser = XContentFactory.xContent(source).createParser(source); @@ -250,7 +241,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(context, "Failed to parse", e); } finally { if (parser != null) { parser.close(); @@ -266,7 +257,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Exception e) { - throw new QueryParsingException(index, "Failed to parse [" + source + "]", e); + throw new QueryParsingException(getParseContext(), "Failed to parse [" + source + "]", e); } finally { if (parser != null) { parser.close(); @@ -282,7 +273,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { try { return innerParse(context, parser); } catch (IOException e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(context, "Failed to parse", e); } } @@ -359,7 +350,7 @@ public class IndexQueryParserService extends AbstractIndexComponent { XContentParser qSourceParser = XContentFactory.xContent(querySource).createParser(querySource); parsedQuery = parse(qSourceParser); } else { - throw new QueryParsingException(index(), "request does not support [" + fieldName + "]"); + throw new QueryParsingException(getParseContext(), "request does not support [" + fieldName + "]"); } } } @@ -369,10 +360,10 @@ public class IndexQueryParserService extends AbstractIndexComponent { } catch (QueryParsingException e) { throw e; } catch (Throwable e) { - throw new QueryParsingException(index, "Failed to parse", e); + throw new QueryParsingException(getParseContext(), "Failed to parse", e); } - throw new QueryParsingException(index(), "Required query is missing"); + throw new QueryParsingException(getParseContext(), "Required query is missing"); } private ParsedQuery innerParse(QueryParseContext parseContext, XContentParser parser) throws IOException, QueryParsingException { diff --git a/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java b/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java index c1f5b804f94..7bd39dad947 100644 --- a/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/IndicesFilterParser.java @@ -83,30 +83,30 @@ public class IndicesFilterParser implements FilterParser { noMatchFilter = parseContext.parseInnerFilter(); } } else { - throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] filter does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("indices".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; Collection indices = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry"); + throw new QueryParsingException(parseContext, "[indices] no value specified for 'indices' entry"); } indices.add(value); } currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()])); } else { - throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("index".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text()); @@ -120,15 +120,15 @@ public class IndicesFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[indices] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] filter does not support [" + currentFieldName + "]"); } } } if (!filterFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'filter' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'filter' element"); } if (!indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'indices' or 'index' element"); } Filter chosenFilter; diff --git a/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java b/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java index d5b5cefa149..a45fe9f88f6 100644 --- a/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/IndicesQueryParser.java @@ -76,30 +76,30 @@ public class IndicesQueryParser implements QueryParser { } else if ("no_match_query".equals(currentFieldName)) { innerNoMatchQuery = new XContentStructure.InnerQuery(parseContext, null); } else { - throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if ("indices".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; Collection indices = new ArrayList<>(); while (parser.nextToken() != XContentParser.Token.END_ARRAY) { String value = parser.textOrNull(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "[indices] no value specified for 'indices' entry"); + throw new QueryParsingException(parseContext, "[indices] no value specified for 'indices' entry"); } indices.add(value); } currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), indices.toArray(new String[indices.size()])); } else { - throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("index".equals(currentFieldName)) { if (indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] indices or index already specified"); + throw new QueryParsingException(parseContext, "[indices] indices or index already specified"); } indicesFound = true; currentIndexMatchesIndices = matchesIndices(parseContext.index().name(), parser.text()); @@ -113,15 +113,15 @@ public class IndicesQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[indices] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[indices] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'query' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'query' element"); } if (!indicesFound) { - throw new QueryParsingException(parseContext.index(), "[indices] requires 'indices' or 'index' element"); + throw new QueryParsingException(parseContext, "[indices] requires 'indices' or 'index' element"); } Query chosenQuery; diff --git a/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java b/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java index 858b23c6693..f4f8fde7427 100644 --- a/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/LimitFilterParser.java @@ -53,13 +53,13 @@ public class LimitFilterParser implements FilterParser { if ("value".equals(currentFieldName)) { limit = parser.intValue(); } else { - throw new QueryParsingException(parseContext.index(), "[limit] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[limit] filter does not support [" + currentFieldName + "]"); } } } if (limit == -1) { - throw new QueryParsingException(parseContext.index(), "No value specified for limit filter"); + throw new QueryParsingException(parseContext, "No value specified for limit filter"); } // this filter is deprecated and parses to a filter that matches everything diff --git a/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java b/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java index 2017b940921..933d3d35631 100644 --- a/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MatchAllQueryParser.java @@ -59,7 +59,7 @@ public class MatchAllQueryParser implements QueryParser { if ("boost".equals(currentFieldName)) { boost = parser.floatValue(); } else { - throw new QueryParsingException(parseContext.index(), "[match_all] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[match_all] query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java b/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java index a0f595a6626..8dd35c84b4d 100644 --- a/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MatchQueryParser.java @@ -65,7 +65,7 @@ public class MatchQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[match] query malformed, no field"); + throw new QueryParsingException(parseContext, "[match] query malformed, no field"); } String fieldName = parser.currentName(); @@ -93,12 +93,12 @@ public class MatchQueryParser implements QueryParser { } else if ("phrase_prefix".equals(tStr) || "phrasePrefix".equals(currentFieldName)) { type = MatchQuery.Type.PHRASE_PREFIX; } else { - throw new QueryParsingException(parseContext.index(), "[match] query does not support type " + tStr); + throw new QueryParsingException(parseContext, "[match] query does not support type " + tStr); } } else if ("analyzer".equals(currentFieldName)) { String analyzer = parser.text(); if (parseContext.analysisService().analyzer(analyzer) == null) { - throw new QueryParsingException(parseContext.index(), "[match] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[match] analyzer [" + parser.text() + "] not found"); } matchQuery.setAnalyzer(analyzer); } else if ("boost".equals(currentFieldName)) { @@ -118,7 +118,8 @@ public class MatchQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { matchQuery.setOccur(BooleanClause.Occur.MUST); } else { - throw new QueryParsingException(parseContext.index(), "text query requires operator to be either 'and' or 'or', not [" + op + "]"); + throw new QueryParsingException(parseContext, "text query requires operator to be either 'and' or 'or', not [" + + op + "]"); } } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); @@ -139,12 +140,12 @@ public class MatchQueryParser implements QueryParser { } else if ("all".equalsIgnoreCase(zeroTermsDocs)) { matchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.ALL); } else { - throw new QueryParsingException(parseContext.index(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); + throw new QueryParsingException(parseContext, "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[match] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[match] query does not support [" + currentFieldName + "]"); } } } @@ -154,12 +155,13 @@ public class MatchQueryParser implements QueryParser { // move to the next token token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new QueryParsingException(parseContext.index(), "[match] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); + throw new QueryParsingException(parseContext, + "[match] query parsed in simplified form, with direct field name, but included more options than just the field name, possibly use its 'options' form, with 'query' element?"); } } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No text specified for text query"); + throw new QueryParsingException(parseContext, "No text specified for text query"); } Query query = matchQuery.parse(type, fieldName, value); diff --git a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java index 10f0405b832..07af9717dc1 100644 --- a/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/MissingFilterParser.java @@ -78,13 +78,13 @@ public class MissingFilterParser implements FilterParser { } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[missing] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[missing] filter does not support [" + currentFieldName + "]"); } } } if (fieldPattern == null) { - throw new QueryParsingException(parseContext.index(), "missing must be provided with a [field]"); + throw new QueryParsingException(parseContext, "missing must be provided with a [field]"); } return newFilter(parseContext, fieldPattern, existence, nullValue, filterName); @@ -92,7 +92,7 @@ public class MissingFilterParser implements FilterParser { public static Filter newFilter(QueryParseContext parseContext, String fieldPattern, boolean existence, boolean nullValue, String filterName) { if (!existence && !nullValue) { - throw new QueryParsingException(parseContext.index(), "missing must have either existence, or null_value, or both set to true"); + throw new QueryParsingException(parseContext, "missing must have either existence, or null_value, or both set to true"); } final FieldMappers fieldNamesMappers = parseContext.mapperService().fullName(FieldNamesFieldMapper.NAME); @@ -144,13 +144,8 @@ public class MissingFilterParser implements FilterParser { boolFilter.add(filter, BooleanClause.Occur.SHOULD); } - // we always cache this one, really does not change... (exists) - // its ok to cache under the fieldName cacheKey, since its per segment and the mapping applies to this data on this segment... - existenceFilter = Queries.wrap(boolFilter); - existenceFilter = parseContext.cacheFilter(existenceFilter, new HashedBytesRef("$exists$" + fieldPattern), parseContext.autoFilterCachePolicy()); - existenceFilter = Queries.wrap(Queries.not(existenceFilter)); - // cache the not filter as well, so it will be faster - existenceFilter = parseContext.cacheFilter(existenceFilter, new HashedBytesRef("$missing$" + fieldPattern), parseContext.autoFilterCachePolicy()); + existenceFilter = new QueryWrapperFilter(boolFilter); + existenceFilter = new QueryWrapperFilter(Queries.not(existenceFilter));; } if (nullValue) { @@ -158,10 +153,6 @@ public class MissingFilterParser implements FilterParser { MapperService.SmartNameFieldMappers smartNameFieldMappers = parseContext.smartFieldMappers(field); if (smartNameFieldMappers != null && smartNameFieldMappers.hasMapper()) { nullFilter = smartNameFieldMappers.mapper().nullValueFilter(); - if (nullFilter != null) { - // cache the not filter as well, so it will be faster - nullFilter = parseContext.cacheFilter(nullFilter, new HashedBytesRef("$null$" + fieldPattern), parseContext.autoFilterCachePolicy()); - } } } } @@ -173,7 +164,7 @@ public class MissingFilterParser implements FilterParser { combined.add(existenceFilter, BooleanClause.Occur.SHOULD); combined.add(nullFilter, BooleanClause.Occur.SHOULD); // cache the not filter as well, so it will be faster - filter = parseContext.cacheFilter(Queries.wrap(combined), null, parseContext.autoFilterCachePolicy()); + filter = new QueryWrapperFilter(combined); } else { filter = nullFilter; } diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index ea814e6a15f..a19376acfcf 100644 --- a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.get.MultiGetRequest; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; @@ -383,7 +382,7 @@ public class MoreLikeThisQueryBuilder extends BaseQueryBuilder implements Boosta builder.endArray(); } if (this.docs.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("more_like_this requires '" + likeFieldName + "' to be provided"); + throw new IllegalArgumentException("more_like_this requires '" + likeFieldName + "' to be provided"); } else { builder.field(likeFieldName, docs); } diff --git a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java index 9ef53961a9e..b726d4f0159 100644 --- a/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryParser.java @@ -28,7 +28,6 @@ import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest; @@ -156,7 +155,7 @@ public class MoreLikeThisQueryParser implements QueryParser { } else if (Fields.INCLUDE.match(currentFieldName, parseContext.parseFlags())) { include = parser.booleanValue(); } else { - throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[mlt] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_ARRAY) { if (Fields.STOP_WORDS.match(currentFieldName, parseContext.parseFlags())) { @@ -173,14 +172,14 @@ public class MoreLikeThisQueryParser implements QueryParser { } else if (Fields.DOCUMENT_IDS.match(currentFieldName, parseContext.parseFlags())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (!token.isValue()) { - throw new ElasticsearchIllegalArgumentException("ids array element should only contain ids"); + throw new IllegalArgumentException("ids array element should only contain ids"); } likeItems.add(newTermVectorsRequest().id(parser.text())); } } else if (Fields.DOCUMENTS.match(currentFieldName, parseContext.parseFlags())) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("docs array element should include an object"); + throw new IllegalArgumentException("docs array element should include an object"); } likeItems.add(parseDocument(parser)); } @@ -193,7 +192,7 @@ public class MoreLikeThisQueryParser implements QueryParser { parseLikeField(parser, ignoreTexts, ignoreItems); } } else { - throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[mlt] query does not support [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { if (Fields.LIKE.match(currentFieldName, parseContext.parseFlags())) { @@ -202,16 +201,16 @@ public class MoreLikeThisQueryParser implements QueryParser { else if (Fields.IGNORE_LIKE.match(currentFieldName, parseContext.parseFlags())) { parseLikeField(parser, ignoreTexts, ignoreItems); } else { - throw new QueryParsingException(parseContext.index(), "[mlt] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[mlt] query does not support [" + currentFieldName + "]"); } } } if (likeTexts.isEmpty() && likeItems.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "more_like_this requires 'like' to be specified"); + throw new QueryParsingException(parseContext, "more_like_this requires 'like' to be specified"); } if (moreLikeFields != null && moreLikeFields.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "more_like_this requires 'fields' to be non-empty"); + throw new QueryParsingException(parseContext, "more_like_this requires 'fields' to be non-empty"); } // set analyzer @@ -259,8 +258,9 @@ public class MoreLikeThisQueryParser implements QueryParser { } if (item.type() == null) { if (parseContext.queryTypes().size() > 1) { - throw new QueryParsingException(parseContext.index(), - "ambiguous type for item with id: " + item.id() + " and index: " + item.index()); + throw new QueryParsingException(parseContext, + "ambiguous type for item with id: " + item.id() + + " and index: " + item.index()); } else { item.type(parseContext.queryTypes().iterator().next()); } @@ -313,7 +313,7 @@ public class MoreLikeThisQueryParser implements QueryParser { } else if (parser.currentToken() == XContentParser.Token.START_OBJECT) { items.add(parseDocument(parser)); } else { - throw new ElasticsearchIllegalArgumentException("Content of 'like' parameter should either be a string or an object"); + throw new IllegalArgumentException("Content of 'like' parameter should either be a string or an object"); } } @@ -331,7 +331,7 @@ public class MoreLikeThisQueryParser implements QueryParser { final String fieldName = it.next(); if (!Analysis.generatesCharacterTokenStream(analyzer, fieldName)) { if (failOnUnsupportedField) { - throw new ElasticsearchIllegalArgumentException("more_like_this doesn't support binary/numeric fields: [" + fieldName + "]"); + throw new IllegalArgumentException("more_like_this doesn't support binary/numeric fields: [" + fieldName + "]"); } else { it.remove(); } @@ -354,4 +354,4 @@ public class MoreLikeThisQueryParser implements QueryParser { boolQuery.add(query, BooleanClause.Occur.MUST_NOT); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java index 3fbd43651de..976dd15dc7b 100644 --- a/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/MultiMatchQueryParser.java @@ -20,6 +20,7 @@ package org.elasticsearch.index.query; import com.google.common.collect.Maps; + import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.Query; import org.elasticsearch.common.inject.Inject; @@ -77,8 +78,7 @@ public class MultiMatchQueryParser implements QueryParser { } else if (token.isValue()) { extractFieldAndBoost(parseContext, parser, fieldNameWithBoosts); } else { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query does not support [" + currentFieldName - + "]"); + throw new QueryParsingException(parseContext, "[" + NAME + "] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -88,7 +88,7 @@ public class MultiMatchQueryParser implements QueryParser { } else if ("analyzer".equals(currentFieldName)) { String analyzer = parser.text(); if (parseContext.analysisService().analyzer(analyzer) == null) { - throw new QueryParsingException(parseContext.index(), "["+ NAME +"] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[" + NAME + "] analyzer [" + parser.text() + "] not found"); } multiMatchQuery.setAnalyzer(analyzer); } else if ("boost".equals(currentFieldName)) { @@ -108,7 +108,8 @@ public class MultiMatchQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { multiMatchQuery.setOccur(BooleanClause.Occur.MUST); } else { - throw new QueryParsingException(parseContext.index(), "text query requires operator to be either 'and' or 'or', not [" + op + "]"); + throw new QueryParsingException(parseContext, "text query requires operator to be either 'and' or 'or', not [" + op + + "]"); } } else if ("minimum_should_match".equals(currentFieldName) || "minimumShouldMatch".equals(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); @@ -131,22 +132,22 @@ public class MultiMatchQueryParser implements QueryParser { } else if ("all".equalsIgnoreCase(zeroTermsDocs)) { multiMatchQuery.setZeroTermsQuery(MatchQuery.ZeroTermsQuery.ALL); } else { - throw new QueryParsingException(parseContext.index(), "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); + throw new QueryParsingException(parseContext, "Unsupported zero_terms_docs value [" + zeroTermsDocs + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[match] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[match] query does not support [" + currentFieldName + "]"); } } } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No text specified for multi_match query"); + throw new QueryParsingException(parseContext, "No text specified for multi_match query"); } if (fieldNameWithBoosts.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "No fields specified for multi_match query"); + throw new QueryParsingException(parseContext, "No fields specified for multi_match query"); } if (type == null) { type = MultiMatchQueryBuilder.Type.BEST_FIELDS; diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java index 0b15bbfcf14..c64c7ef327e 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterBuilder.java @@ -32,8 +32,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { private final String path; private Boolean join; - private Boolean cache; - private String cacheKey; private String filterName; private QueryInnerHitBuilder innerHit = null; @@ -55,19 +53,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public NestedFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public NestedFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the filter name for the filter that can be used when searching for matched_filters per hit. */ @@ -101,12 +86,6 @@ public class NestedFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (innerHit != null) { builder.startObject("inner_hits"); builder.value(innerHit); diff --git a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java index f6cad0a57e0..467705afca7 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedFilterParser.java @@ -20,12 +20,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ToParentBlockJoinQuery; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.support.InnerHitsQueryParserHelper; @@ -53,8 +52,6 @@ public class NestedFilterParser implements FilterParser { final NestedQueryParser.ToBlockJoinQueryBuilder builder = new NestedQueryParser.ToBlockJoinQueryBuilder(parseContext); float boost = 1.0f; - boolean cache = false; - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -62,6 +59,8 @@ public class NestedFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("query".equals(currentFieldName)) { builder.query(); @@ -70,7 +69,7 @@ public class NestedFilterParser implements FilterParser { } else if ("inner_hits".equals(currentFieldName)) { builder.setInnerHits(innerHitsQueryParserHelper.parse(parseContext)); } else { - throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("path".equals(currentFieldName)) { @@ -79,12 +78,8 @@ public class NestedFilterParser implements FilterParser { boost = parser.floatValue(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parser.booleanValue(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[nested] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] filter does not support [" + currentFieldName + "]"); } } } @@ -92,10 +87,7 @@ public class NestedFilterParser implements FilterParser { ToParentBlockJoinQuery joinQuery = builder.build(); if (joinQuery != null) { joinQuery.getChildQuery().setBoost(boost); - Filter nestedFilter = Queries.wrap(joinQuery, parseContext); - if (cache) { - nestedFilter = parseContext.cacheFilter(nestedFilter, cacheKey, parseContext.autoFilterCachePolicy()); - } + Filter nestedFilter = new QueryWrapperFilter(joinQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, nestedFilter); } diff --git a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java index 989388b79d4..ba9bcf07d46 100644 --- a/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/NestedQueryParser.java @@ -75,7 +75,7 @@ public class NestedQueryParser implements QueryParser { } else if ("inner_hits".equals(currentFieldName)) { builder.setInnerHits(innerHitsQueryParserHelper.parse(parseContext)); } else { - throw new QueryParsingException(parseContext.index(), "[nested] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("path".equals(currentFieldName)) { @@ -93,12 +93,12 @@ public class NestedQueryParser implements QueryParser { } else if ("none".equals(sScoreMode)) { scoreMode = ScoreMode.None; } else { - throw new QueryParsingException(parseContext.index(), "illegal score_mode for nested query [" + sScoreMode + "]"); + throw new QueryParsingException(parseContext, "illegal score_mode for nested query [" + sScoreMode + "]"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[nested] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[nested] query does not support [" + currentFieldName + "]"); } } } @@ -144,7 +144,7 @@ public class NestedQueryParser implements QueryParser { innerQuery = null; } } else { - throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[nested] requires either 'query' or 'filter' field"); } if (innerHits != null) { diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java index f835b025f9c..1c5ea06f077 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterBuilder.java @@ -32,22 +32,12 @@ public class NotFilterBuilder extends BaseFilterBuilder { private FilterBuilder filter; - private Boolean cache; - private String filterName; public NotFilterBuilder(FilterBuilder filter) { this.filter = filter; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public NotFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - public NotFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; @@ -58,9 +48,6 @@ public class NotFilterBuilder extends BaseFilterBuilder { builder.startObject(NotFilterParser.NAME); builder.field("filter"); filter.toXContent(builder, params); - if (cache != null) { - builder.field("_cache", cache); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java index db8adccc5dd..7ebf0fe82dc 100644 --- a/src/main/java/org/elasticsearch/index/query/NotFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/NotFilterParser.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; @@ -50,8 +49,6 @@ public class NotFilterParser implements FilterParser { Filter filter = null; boolean filterFound = false; - boolean cache = false; - HashedBytesRef cacheKey = null; String filterName = null; String currentFieldName = null; @@ -59,6 +56,8 @@ public class NotFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("filter".equals(currentFieldName)) { filter = parseContext.parseInnerFilter(); @@ -73,30 +72,23 @@ public class NotFilterParser implements FilterParser { // its the filter, and the name is the field filter = parseContext.parseInnerFilter(currentFieldName); } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parser.booleanValue(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[not] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[not] filter does not support [" + currentFieldName + "]"); } } } if (!filterFound) { - throw new QueryParsingException(parseContext.index(), "filter is required when using `not` filter"); + throw new QueryParsingException(parseContext, "filter is required when using `not` filter"); } if (filter == null) { return null; } - Filter notFilter = Queries.wrap(Queries.not(filter)); - if (cache) { - notFilter = parseContext.cacheFilter(notFilter, cacheKey, parseContext.autoFilterCachePolicy()); - } + Filter notFilter = new QueryWrapperFilter(Queries.not(filter)); if (filterName != null) { parseContext.addNamedFilter(filterName, notFilter); } diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java index 04d516b00b7..2feca66fc56 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterBuilder.java @@ -34,9 +34,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { private ArrayList filters = Lists.newArrayList(); - private Boolean cache; - private String cacheKey; - private String filterName; public OrFilterBuilder(FilterBuilder... filters) { @@ -53,19 +50,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public OrFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public OrFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - public OrFilterBuilder filterName(String filterName) { this.filterName = filterName; return this; @@ -79,12 +63,6 @@ public class OrFilterBuilder extends BaseFilterBuilder { filter.toXContent(builder, params); } builder.endArray(); - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (filterName != null) { builder.field("_name", filterName); } diff --git a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java index 9c3ad615105..dae526563c6 100644 --- a/src/main/java/org/elasticsearch/index/query/OrFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/OrFilterParser.java @@ -22,10 +22,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -56,9 +54,6 @@ public class OrFilterParser implements FilterParser { ArrayList filters = newArrayList(); boolean filtersFound = false; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; - String filterName = null; String currentFieldName = null; XContentParser.Token token = parser.currentToken(); @@ -93,21 +88,17 @@ public class OrFilterParser implements FilterParser { } } } else if (token.isValue()) { - if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_name".equals(currentFieldName)) { + if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[or] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[or] filter does not support [" + currentFieldName + "]"); } } } } if (!filtersFound) { - throw new QueryParsingException(parseContext.index(), "[or] filter requires 'filters' to be set on it'"); + throw new QueryParsingException(parseContext, "[or] filter requires 'filters' to be set on it'"); } if (filters.isEmpty()) { @@ -119,10 +110,7 @@ public class OrFilterParser implements FilterParser { for (Filter filter : filters) { boolQuery.add(filter, Occur.SHOULD); } - Filter filter = Queries.wrap(boolQuery); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } + Filter filter = new QueryWrapperFilter(boolQuery); if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java index bb41e4f104a..d202fcc281f 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterBuilder.java @@ -35,9 +35,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { private final String prefix; - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -60,19 +57,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public PrefixFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public PrefixFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(PrefixFilterParser.NAME); @@ -80,12 +64,6 @@ public class PrefixFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java index e6bc4e3437f..d1f291e4606 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixFilterParser.java @@ -22,11 +22,9 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -52,8 +50,6 @@ public class PrefixFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object value = null; @@ -66,10 +62,6 @@ public class PrefixFilterParser implements FilterParser { } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = currentFieldName; value = parser.objectBytes(); @@ -78,7 +70,7 @@ public class PrefixFilterParser implements FilterParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for prefix filter"); + throw new QueryParsingException(parseContext, "No value specified for prefix filter"); } Filter filter = null; @@ -88,12 +80,9 @@ public class PrefixFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().prefixFilter(value, parseContext); } if (filter == null) { - filter = Queries.wrap(new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); + filter = new QueryWrapperFilter(new PrefixQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } diff --git a/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java b/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java index 0cecb0aa651..dc59007c461 100644 --- a/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/PrefixQueryParser.java @@ -53,7 +53,7 @@ public class PrefixQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[prefix] query malformed, no field"); + throw new QueryParsingException(parseContext, "[prefix] query malformed, no field"); } String fieldName = parser.currentName(); String rewriteMethod = null; @@ -80,7 +80,7 @@ public class PrefixQueryParser implements QueryParser { queryName = parser.text(); } } else { - throw new QueryParsingException(parseContext.index(), "[prefix] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[prefix] query does not support [" + currentFieldName + "]"); } } parser.nextToken(); @@ -90,7 +90,7 @@ public class PrefixQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for prefix query"); + throw new QueryParsingException(parseContext, "No value specified for prefix query"); } MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewriteMethod, null); diff --git a/src/main/java/org/elasticsearch/index/query/QueryBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryBuilder.java index c4d3588edce..87025de9bb6 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/QueryBuilder.java @@ -29,7 +29,7 @@ import org.elasticsearch.common.xcontent.XContentType; */ public interface QueryBuilder extends ToXContent { - BytesReference buildAsBytes() throws ElasticsearchException; + BytesReference buildAsBytes(); - BytesReference buildAsBytes(XContentType contentType) throws ElasticsearchException; + BytesReference buildAsBytes(XContentType contentType); } diff --git a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java index 415544f3288..f383dc33ac6 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryBuilders.java +++ b/src/main/java/org/elasticsearch/index/query/QueryBuilders.java @@ -318,6 +318,16 @@ public abstract class QueryBuilders { return new SpanOrQueryBuilder(); } + /** Creates a new {@code span_within} builder. */ + public static SpanWithinQueryBuilder spanWithinQuery() { + return new SpanWithinQueryBuilder(); + } + + /** Creates a new {@code span_containing} builder. */ + public static SpanContainingQueryBuilder spanContainingQuery() { + return new SpanContainingQueryBuilder(); + } + /** * Creates a {@link SpanQueryBuilder} which allows having a sub query * which implements {@link MultiTermQueryBuilder}. This is useful for diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java index cf5db0f0ac1..c4af2419b5c 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/QueryFilterBuilder.java @@ -32,8 +32,6 @@ public class QueryFilterBuilder extends BaseFilterBuilder { private final QueryBuilder queryBuilder; - private Boolean cache; - private String filterName; /** @@ -53,17 +51,9 @@ public class QueryFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public QueryFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { - if (filterName == null && cache == null) { + if (filterName == null) { builder.field(QueryFilterParser.NAME); queryBuilder.toXContent(builder, params); } else { @@ -73,9 +63,6 @@ public class QueryFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java index aaa5a9d1e99..bdc09dbee78 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/QueryFilterParser.java @@ -21,8 +21,8 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import java.io.IOException; @@ -48,6 +48,6 @@ public class QueryFilterParser implements FilterParser { if (query == null) { return null; } - return Queries.wrap(query, parseContext); + return new QueryWrapperFilter(query); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java index fb55b7b818f..aaf247e90cc 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParseContext.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParseContext.java @@ -22,41 +22,31 @@ package org.elasticsearch.index.query; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queryparser.classic.MapperQueryParser; import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.search.similarities.Similarity; -import org.apache.lucene.util.Bits; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.common.lucene.search.NoCacheQuery; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.common.lucene.search.ResolvableFilter; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.analysis.AnalysisService; -import org.elasticsearch.index.cache.query.parser.QueryParserCache; import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMappers; import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperBuilders; -import org.elasticsearch.index.mapper.ContentPath; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.query.support.NestedScope; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext; @@ -64,13 +54,21 @@ import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * */ public class QueryParseContext { + private static final ParseField CACHE = new ParseField("_cache").withAllDeprecated("Elasticsearch makes its own caching decisions"); + private static final ParseField CACHE_KEY = new ParseField("_cache_key").withAllDeprecated("Filters are always used as cache keys"); + private static ThreadLocal typesContext = new ThreadLocal<>(); public static void setTypes(String[] types) { @@ -93,10 +91,6 @@ public class QueryParseContext { private final Index index; - private boolean propagateNoCache = false; - - private boolean requireCustomQueryWrappingFilter = false; - private final IndexQueryParserService indexQueryParser; private final Map namedFilters = Maps.newHashMap(); @@ -107,8 +101,6 @@ public class QueryParseContext { private EnumSet parseFlags = ParseField.EMPTY_FLAGS; - private final boolean disableFilterCaching; - private boolean allowUnmappedFields; private boolean mapUnmappedFieldAsString; @@ -116,14 +108,8 @@ public class QueryParseContext { private NestedScope nestedScope; public QueryParseContext(Index index, IndexQueryParserService indexQueryParser) { - this(index, indexQueryParser, false); - } - - public QueryParseContext(Index index, IndexQueryParserService indexQueryParser, boolean disableFilterCaching) { this.index = index; this.indexQueryParser = indexQueryParser; - this.propagateNoCache = disableFilterCaching; - this.disableFilterCaching = disableFilterCaching; } public void parseFlags(EnumSet parseFlags) { @@ -140,8 +126,6 @@ public class QueryParseContext { this.lookup = null; this.parser = jp; this.namedFilters.clear(); - this.requireCustomQueryWrappingFilter = false; - this.propagateNoCache = false; this.nestedScope = new NestedScope(); } @@ -182,32 +166,10 @@ public class QueryParseContext { return indexQueryParser.similarityService != null ? indexQueryParser.similarityService.similarity() : null; } - public QueryParserCache queryParserCache() { - return indexQueryParser.indexCache.queryParserCache(); - } - public String defaultField() { return indexQueryParser.defaultField(); } - public QueryCachingPolicy autoFilterCachePolicy() { - return indexQueryParser.autoFilterCachePolicy(); - } - - public QueryCachingPolicy parseFilterCachePolicy() throws IOException { - final String text = parser.textOrNull(); - if (text == null || text.equals("auto")) { - return autoFilterCachePolicy(); - } else if (parser.booleanValue()) { - // cache without conditions on how many times the filter has been - // used or what the produced DocIdSet looks like, but ONLY on large - // segments to not pollute the cache - return QueryCachingPolicy.CacheOnLargeSegments.DEFAULT; - } else { - return null; - } - } - public boolean queryStringLenient() { return indexQueryParser.queryStringLenient(); } @@ -221,38 +183,6 @@ public class QueryParseContext { return indexQueryParser.bitsetFilterCache.getBitDocIdSetFilter(filter); } - public Filter cacheFilter(Filter filter, final @Nullable HashedBytesRef cacheKey, final QueryCachingPolicy cachePolicy) { - if (filter == null) { - return null; - } - if (this.disableFilterCaching || this.propagateNoCache || filter instanceof NoCacheFilter) { - return filter; - } - if (filter instanceof ResolvableFilter) { - final ResolvableFilter resolvableFilter = (ResolvableFilter) filter; - // We need to wrap it another filter, because this method is invoked at query parse time, which - // may not be during search execution time. (for example index alias filter and percolator) - return new Filter() { - @Override - public DocIdSet getDocIdSet(LeafReaderContext atomicReaderContext, Bits bits) throws IOException { - Filter filter = resolvableFilter.resolve(); - if (filter == null) { - return null; - } - filter = indexQueryParser.indexCache.filter().cache(filter, cacheKey, cachePolicy); - return filter.getDocIdSet(atomicReaderContext, bits); - } - - @Override - public String toString(String field) { - return "AnonymousResolvableFilter"; // TODO: not sure what is going on here - } - }; - } else { - return indexQueryParser.indexCache.filter().cache(filter, cacheKey, cachePolicy); - } - } - public > IFD getForField(FieldMapper mapper) { return indexQueryParser.fieldDataService.getForField(mapper); } @@ -262,7 +192,7 @@ public class QueryParseContext { } public void addNamedQuery(String name, Query query) { - namedFilters.put(name, Queries.wrap(query, this)); + namedFilters.put(name, new QueryWrapperFilter(query)); } public ImmutableMap copyNamedFilters() { @@ -292,39 +222,29 @@ public class QueryParseContext { if (parser.currentToken() != XContentParser.Token.START_OBJECT) { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(index, "[_na] query malformed, must start with start_object"); + throw new QueryParsingException(this, "[_na] query malformed, must start with start_object"); } } token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(index, "[_na] query malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] query malformed, no field after start_object"); } String queryName = parser.currentName(); // move to the next START_OBJECT token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) { - throw new QueryParsingException(index, "[_na] query malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] query malformed, no field after start_object"); } QueryParser queryParser = indexQueryParser.queryParser(queryName); if (queryParser == null) { - throw new QueryParsingException(index, "No query registered for [" + queryName + "]"); + throw new QueryParsingException(this, "No query registered for [" + queryName + "]"); } Query result = queryParser.parse(this); if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { // if we are at END_OBJECT, move to the next one... parser.nextToken(); } - if (result instanceof NoCacheQuery) { - propagateNoCache = true; - } - if (CustomQueryWrappingFilter.shouldUseCustomQueryWrappingFilter(result)) { - requireCustomQueryWrappingFilter = true; - // If later on, either directly or indirectly this query gets wrapped in a query filter it must never - // get cached even if a filter higher up the chain is configured to do this. This will happen, because - // the result filter will be instance of NoCacheFilter (CustomQueryWrappingFilter) which will in - // #executeFilterParser() set propagateNoCache to true. - } return result; } @@ -335,7 +255,7 @@ public class QueryParseContext { if (parser.currentToken() != XContentParser.Token.START_OBJECT) { token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(index, "[_na] filter malformed, must start with start_object"); + throw new QueryParsingException(this, "[_na] filter malformed, must start with start_object"); } } token = parser.nextToken(); @@ -344,20 +264,20 @@ public class QueryParseContext { if (token == XContentParser.Token.END_OBJECT || token == XContentParser.Token.VALUE_NULL) { return null; } - throw new QueryParsingException(index, "[_na] filter malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] filter malformed, no field after start_object"); } String filterName = parser.currentName(); // move to the next START_OBJECT or START_ARRAY token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT && token != XContentParser.Token.START_ARRAY) { - throw new QueryParsingException(index, "[_na] filter malformed, no field after start_object"); + throw new QueryParsingException(this, "[_na] filter malformed, no field after start_object"); } FilterParser filterParser = indexQueryParser.filterParser(filterName); if (filterParser == null) { - throw new QueryParsingException(index, "No filter registered for [" + filterName + "]"); + throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } - Filter result = executeFilterParser(filterParser); + Filter result = filterParser.parse(this); if (parser.currentToken() == XContentParser.Token.END_OBJECT || parser.currentToken() == XContentParser.Token.END_ARRAY) { // if we are at END_OBJECT, move to the next one... parser.nextToken(); @@ -368,20 +288,9 @@ public class QueryParseContext { public Filter parseInnerFilter(String filterName) throws IOException, QueryParsingException { FilterParser filterParser = indexQueryParser.filterParser(filterName); if (filterParser == null) { - throw new QueryParsingException(index, "No filter registered for [" + filterName + "]"); + throw new QueryParsingException(this, "No filter registered for [" + filterName + "]"); } - return executeFilterParser(filterParser); - } - - private Filter executeFilterParser(FilterParser filterParser) throws IOException { - final boolean propagateNoCache = this.propagateNoCache; // first safe the state that we need to restore - this.propagateNoCache = false; // parse the subfilter with caching, that's fine - Filter result = filterParser.parse(this); - // now make sure we set propagateNoCache to true if it is true already or if the result is - // an instance of NoCacheFilter or if we used to be true! all filters above will - // be not cached ie. wrappers of this filter! - this.propagateNoCache |= (result instanceof NoCacheFilter) || propagateNoCache; - return result; + return filterParser.parse(this); } public FieldMapper fieldMapper(String name) { @@ -432,7 +341,8 @@ public class QueryParseContext { } else { Version indexCreatedVersion = indexQueryParser.getIndexCreatedVersion(); if (fieldMapping == null && indexCreatedVersion.onOrAfter(Version.V_1_4_0_Beta1)) { - throw new QueryParsingException(index, "Strict field resolution and no field mapping can be found for the field with name [" + name + "]"); + throw new QueryParsingException(this, "Strict field resolution and no field mapping can be found for the field with name [" + + name + "]"); } else { return fieldMapping; } @@ -474,11 +384,14 @@ public class QueryParseContext { return System.currentTimeMillis(); } - public boolean requireCustomQueryWrappingFilter() { - return requireCustomQueryWrappingFilter; - } - public NestedScope nestedScope() { return nestedScope; } + + /** + * Return whether the setting is deprecated. + */ + public boolean isDeprecatedSetting(String setting) { + return CACHE.match(setting) || CACHE_KEY.match(setting); + } } diff --git a/src/main/java/org/elasticsearch/index/query/QueryParserUtils.java b/src/main/java/org/elasticsearch/index/query/QueryParserUtils.java deleted file mode 100644 index 558722f1daf..00000000000 --- a/src/main/java/org/elasticsearch/index/query/QueryParserUtils.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.query; - -import org.elasticsearch.action.deletebyquery.TransportShardDeleteByQueryAction; -import org.elasticsearch.search.internal.SearchContext; - -/** - */ -public final class QueryParserUtils { - - private QueryParserUtils() { - } - - /** - * Ensures that the query parsing wasn't invoked via the delete by query api. - */ - public static void ensureNotDeleteByQuery(String name, QueryParseContext parseContext) { - SearchContext context = SearchContext.current(); - if (context == null) { - // We can't do the api check, because there is no search context. - // Because the delete by query shard transport action sets the search context this isn't an issue. - return; - } - - if (TransportShardDeleteByQueryAction.DELETE_BY_QUERY_API.equals(context.source())) { - throw new QueryParsingException(parseContext.index(), "[" + name + "] query and filter unsupported in delete_by_query api"); - } - } - -} diff --git a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java index 8c3a0fbccbe..b9b0381e90e 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryParsingException.java +++ b/src/main/java/org/elasticsearch/index/query/QueryParsingException.java @@ -19,25 +19,81 @@ package org.elasticsearch.index.query; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexException; import org.elasticsearch.rest.RestStatus; +import java.io.IOException; + /** * */ public class QueryParsingException extends IndexException { - public QueryParsingException(Index index, String msg) { - super(index, msg); + static final int UNKNOWN_POSITION = -1; + private int lineNumber = UNKNOWN_POSITION; + private int columnNumber = UNKNOWN_POSITION; + + public QueryParsingException(QueryParseContext parseContext, String msg) { + this(parseContext, msg, null); } - public QueryParsingException(Index index, String msg, Throwable cause) { + public QueryParsingException(QueryParseContext parseContext, String msg, Throwable cause) { + super(parseContext.index(), msg, cause); + + XContentParser parser = parseContext.parser(); + if (parser != null) { + XContentLocation location = parser.getTokenLocation(); + if (location != null) { + lineNumber = location.lineNumber; + columnNumber = location.columnNumber; + } + } + } + + /** + * This constructor is provided for use in unit tests where a + * {@link QueryParseContext} may not be available + */ + QueryParsingException(Index index, int line, int col, String msg, Throwable cause) { super(index, msg, cause); + this.lineNumber = line; + this.columnNumber = col; + } + + /** + * Line number of the location of the error + * + * @return the line number or -1 if unknown + */ + public int getLineNumber() { + return lineNumber; + } + + /** + * Column number of the location of the error + * + * @return the column number or -1 if unknown + */ + public int getColumnNumber() { + return columnNumber; } @Override public RestStatus status() { return RestStatus.BAD_REQUEST; } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (lineNumber != UNKNOWN_POSITION) { + builder.field("line", lineNumber); + builder.field("col", columnNumber); + } + super.innerToXContent(builder, params); + } + } diff --git a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java index ded43bc50e8..402080789f4 100644 --- a/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/QueryStringQueryParser.java @@ -126,7 +126,8 @@ public class QueryStringQueryParser implements QueryParser { } } } else { - throw new QueryParsingException(parseContext.index(), "[query_string] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[query_string] query does not support [" + currentFieldName + + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -140,18 +141,19 @@ public class QueryStringQueryParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { qpSettings.defaultOperator(org.apache.lucene.queryparser.classic.QueryParser.Operator.AND); } else { - throw new QueryParsingException(parseContext.index(), "Query default operator [" + op + "] is not allowed"); + throw new QueryParsingException(parseContext, "Query default operator [" + op + "] is not allowed"); } } else if ("analyzer".equals(currentFieldName)) { NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text()); if (analyzer == null) { - throw new QueryParsingException(parseContext.index(), "[query_string] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[query_string] analyzer [" + parser.text() + "] not found"); } qpSettings.forcedAnalyzer(analyzer); } else if ("quote_analyzer".equals(currentFieldName) || "quoteAnalyzer".equals(currentFieldName)) { NamedAnalyzer analyzer = parseContext.analysisService().analyzer(parser.text()); if (analyzer == null) { - throw new QueryParsingException(parseContext.index(), "[query_string] quote_analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[query_string] quote_analyzer [" + parser.text() + + "] not found"); } qpSettings.forcedQuoteAnalyzer(analyzer); } else if ("allow_leading_wildcard".equals(currentFieldName) || "allowLeadingWildcard".equals(currentFieldName)) { @@ -199,17 +201,19 @@ public class QueryStringQueryParser implements QueryParser { try { qpSettings.timeZone(DateTimeZone.forID(parser.text())); } catch (IllegalArgumentException e) { - throw new QueryParsingException(parseContext.index(), "[query_string] time_zone [" + parser.text() + "] is unknown"); + throw new QueryParsingException(parseContext, + "[query_string] time_zone [" + parser.text() + "] is unknown"); } } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[query_string] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[query_string] query does not support [" + currentFieldName + + "]"); } } } if (qpSettings.queryString() == null) { - throw new QueryParsingException(parseContext.index(), "query_string must be provided with a [query]"); + throw new QueryParsingException(parseContext, "query_string must be provided with a [query]"); } qpSettings.defaultAnalyzer(parseContext.mapperService().searchAnalyzer()); qpSettings.defaultQuoteAnalyzer(parseContext.mapperService().searchQuoteAnalyzer()); @@ -219,18 +223,11 @@ public class QueryStringQueryParser implements QueryParser { } qpSettings.queryTypes(parseContext.queryTypes()); - Query query = parseContext.queryParserCache().get(qpSettings); - if (query != null) { - if (queryName != null) { - parseContext.addNamedQuery(queryName, query); - } - return query; - } MapperQueryParser queryParser = parseContext.queryParser(qpSettings); try { - query = queryParser.parse(qpSettings.queryString()); + Query query = queryParser.parse(qpSettings.queryString()); if (query == null) { return null; } @@ -241,13 +238,12 @@ public class QueryStringQueryParser implements QueryParser { if (query instanceof BooleanQuery) { Queries.applyMinimumShouldMatch((BooleanQuery) query, qpSettings.minimumShouldMatch()); } - parseContext.queryParserCache().put(qpSettings, query); if (queryName != null) { parseContext.addNamedQuery(queryName, query); } return query; } catch (org.apache.lucene.queryparser.classic.ParseException e) { - throw new QueryParsingException(parseContext.index(), "Failed to parse query [" + qpSettings.queryString() + "]", e); + throw new QueryParsingException(parseContext, "Failed to parse query [" + qpSettings.queryString() + "]", e); } } } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java index 80149821438..42753179b07 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterBuilder.java @@ -41,9 +41,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { private boolean includeUpper = true; - private Boolean cache; - private String cacheKey; - private String filterName; private String execution; @@ -341,19 +338,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to true. - */ - public RangeFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public RangeFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - /** * Sets the execution mode that controls how the range filter is executed. Valid values are: "index" and "fielddata". *
    @@ -397,12 +381,6 @@ public class RangeFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } if (execution != null) { builder.field("execution", execution); } diff --git a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java index 300ed66e6d8..83650aecd5d 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RangeFilterParser.java @@ -20,14 +20,12 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermRangeQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.joda.DateMathParser; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; @@ -57,8 +55,6 @@ public class RangeFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object from = null; Object to = null; @@ -74,6 +70,8 @@ public class RangeFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -105,27 +103,23 @@ public class RangeFilterParser implements FilterParser { } else if ("format".equals(currentFieldName)) { forcedDateParser = new DateMathParser(Joda.forPattern(parser.text()), DateFieldMapper.Defaults.TIME_UNIT); } else { - throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[range] filter does not support [" + currentFieldName + "]"); } } } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if ("execution".equals(currentFieldName)) { execution = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[range] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[range] filter does not support [" + currentFieldName + "]"); } } } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "[range] filter no field specified for range filter"); + throw new QueryParsingException(parseContext, "[range] filter no field specified for range filter"); } Filter filter = null; @@ -136,44 +130,47 @@ public class RangeFilterParser implements FilterParser { FieldMapper mapper = smartNameFieldMappers.mapper(); if (mapper instanceof DateFieldMapper) { if ((from instanceof Number || to instanceof Number) && timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + "]"); + throw new QueryParsingException(parseContext, + "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + + fieldName + "]"); } filter = ((DateFieldMapper) mapper).rangeFilter(from, to, includeLower, includeUpper, timeZone, forcedDateParser, parseContext); } else { if (timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone can not be applied to non date field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + + fieldName + "]"); } filter = mapper.rangeFilter(from, to, includeLower, includeUpper, parseContext); } } else if ("fielddata".equals(execution)) { FieldMapper mapper = smartNameFieldMappers.mapper(); if (!(mapper instanceof NumberFieldMapper)) { - throw new QueryParsingException(parseContext.index(), "[range] filter field [" + fieldName + "] is not a numeric type"); + throw new QueryParsingException(parseContext, "[range] filter field [" + fieldName + "] is not a numeric type"); } if (mapper instanceof DateFieldMapper) { if ((from instanceof Number || to instanceof Number) && timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + "]"); + throw new QueryParsingException(parseContext, + "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + + fieldName + "]"); } filter = ((DateFieldMapper) mapper).rangeFilter(parseContext, from, to, includeLower, includeUpper, timeZone, forcedDateParser, parseContext); } else { if (timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone can not be applied to non date field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + + fieldName + "]"); } filter = ((NumberFieldMapper) mapper).rangeFilter(parseContext, from, to, includeLower, includeUpper, parseContext); } } else { - throw new QueryParsingException(parseContext.index(), "[range] filter doesn't support [" + execution + "] execution"); + throw new QueryParsingException(parseContext, "[range] filter doesn't support [" + execution + "] execution"); } } } if (filter == null) { - filter = Queries.wrap(new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper)); + filter = new QueryWrapperFilter(new TermRangeQuery(fieldName, BytesRefs.toBytesRef(from), BytesRefs.toBytesRef(to), includeLower, includeUpper)); } - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); diff --git a/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java b/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java index cfc600832ec..354da1df704 100644 --- a/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/RangeQueryParser.java @@ -55,12 +55,12 @@ public class RangeQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[range] query malformed, no field to indicate field name"); + throw new QueryParsingException(parseContext, "[range] query malformed, no field to indicate field name"); } String fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(parseContext.index(), "[range] query malformed, after field missing start object"); + throw new QueryParsingException(parseContext, "[range] query malformed, after field missing start object"); } Object from = null; @@ -106,7 +106,7 @@ public class RangeQueryParser implements QueryParser { } else if ("format".equals(currentFieldName)) { forcedDateParser = new DateMathParser(Joda.forPattern(parser.text()), DateFieldMapper.Defaults.TIME_UNIT); } else { - throw new QueryParsingException(parseContext.index(), "[range] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[range] query does not support [" + currentFieldName + "]"); } } } @@ -114,7 +114,7 @@ public class RangeQueryParser implements QueryParser { // move to the next end object, to close the field name token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new QueryParsingException(parseContext.index(), "[range] query malformed, does not end with an object"); + throw new QueryParsingException(parseContext, "[range] query malformed, does not end with an object"); } Query query = null; @@ -124,12 +124,15 @@ public class RangeQueryParser implements QueryParser { FieldMapper mapper = smartNameFieldMappers.mapper(); if (mapper instanceof DateFieldMapper) { if ((from instanceof Number || to instanceof Number) && timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + "]"); + throw new QueryParsingException(parseContext, + "[range] time_zone when using ms since epoch format as it's UTC based can not be applied to [" + fieldName + + "]"); } query = ((DateFieldMapper) mapper).rangeQuery(from, to, includeLower, includeUpper, timeZone, forcedDateParser, parseContext); } else { if (timeZone != null) { - throw new QueryParsingException(parseContext.index(), "[range] time_zone can not be applied to non date field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "[range] time_zone can not be applied to non date field [" + + fieldName + "]"); } //LUCENE 4 UPGRADE Mapper#rangeQuery should use bytesref as well? query = mapper.rangeQuery(from, to, includeLower, includeUpper, parseContext); diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java index f199f83b5a5..f730c084be3 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterBuilder.java @@ -38,8 +38,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { private int maxDeterminizedStates = Operations.DEFAULT_MAX_DETERMINIZED_STATES; private boolean maxDetermizedStatesSet; - private Boolean cache; - private String cacheKey; private String filterName; /** @@ -87,19 +85,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public RegexpFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public RegexpFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(RegexpFilterParser.NAME); @@ -118,12 +103,6 @@ public class RegexpFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java index 76db069af17..9612812ffe9 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFilterParser.java @@ -21,13 +21,11 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.RegexpQuery; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -53,8 +51,6 @@ public class RegexpFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; String secondaryFieldName = null; Object value = null; @@ -68,6 +64,8 @@ public class RegexpFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { @@ -84,17 +82,13 @@ public class RegexpFilterParser implements FilterParser { } else if ("flags_value".equals(currentFieldName)) { flagsValue = parser.intValue(); } else { - throw new QueryParsingException(parseContext.index(), "[regexp] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[regexp] filter does not support [" + currentFieldName + "]"); } } } } else { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { secondaryFieldName = currentFieldName; secondaryValue = parser.objectBytes(); @@ -108,7 +102,7 @@ public class RegexpFilterParser implements FilterParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for regexp filter"); + throw new QueryParsingException(parseContext, "No value specified for regexp filter"); } Filter filter = null; @@ -118,11 +112,7 @@ public class RegexpFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().regexpFilter(value, flagsValue, maxDeterminizedStates, parseContext); } if (filter == null) { - filter = Queries.wrap(new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue, maxDeterminizedStates)); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new RegexpQuery(new Term(fieldName, BytesRefs.toBytesRef(value)), flagsValue, maxDeterminizedStates)); } if (filterName != null) { diff --git a/src/main/java/org/elasticsearch/index/query/RegexpFlag.java b/src/main/java/org/elasticsearch/index/query/RegexpFlag.java index 454d0264b67..d2a3eafea56 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpFlag.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpFlag.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.query; import java.util.Locale; import org.apache.lucene.util.automaton.RegExp; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; /** @@ -128,7 +127,7 @@ public enum RegexpFlag { } magic |= flag.value(); } catch (IllegalArgumentException iae) { - throw new ElasticsearchIllegalArgumentException("Unknown regexp flag [" + s + "]"); + throw new IllegalArgumentException("Unknown regexp flag [" + s + "]"); } } return magic; diff --git a/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java b/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java index 41d53316a57..a1ec2996332 100644 --- a/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/RegexpQueryParser.java @@ -55,7 +55,7 @@ public class RegexpQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[regexp] query malformed, no field"); + throw new QueryParsingException(parseContext, "[regexp] query malformed, no field"); } String fieldName = parser.currentName(); String rewriteMethod = null; @@ -92,7 +92,7 @@ public class RegexpQueryParser implements QueryParser { queryName = parser.text(); } } else { - throw new QueryParsingException(parseContext.index(), "[regexp] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[regexp] query does not support [" + currentFieldName + "]"); } } parser.nextToken(); @@ -102,7 +102,7 @@ public class RegexpQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for regexp query"); + throw new QueryParsingException(parseContext, "No value specified for regexp query"); } MultiTermQuery.RewriteMethod method = QueryParsers.parseRewriteMethod(rewriteMethod, null); diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java index 75ffa386198..89f0fe7f033 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterBuilder.java @@ -37,9 +37,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { private String lang; - private Boolean cache; - private String cacheKey; - private String filterName; public ScriptFilterBuilder(String script) { @@ -79,19 +76,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public ScriptFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public ScriptFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(ScriptFilterParser.NAME); @@ -105,12 +89,6 @@ public class ScriptFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java index 8ada496be08..7e4b59a5748 100644 --- a/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/ScriptFilterParser.java @@ -24,14 +24,12 @@ import org.apache.lucene.search.BitsFilteredDocIdSet; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; @@ -41,6 +39,7 @@ import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.util.Map; +import java.util.Objects; import static com.google.common.collect.Maps.newHashMap; @@ -67,8 +66,6 @@ public class ScriptFilterParser implements FilterParser { XContentParser.Token token; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; // also, when caching, since its isCacheable is false, will result in loading all bit set... String script = null; String scriptLang; @@ -81,21 +78,19 @@ public class ScriptFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { if ("params".equals(currentFieldName)) { params = parser.map(); } else { - throw new QueryParsingException(parseContext.index(), "[script] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[script] filter does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else if (!scriptParameterParser.token(currentFieldName, token, parser)){ - throw new QueryParsingException(parseContext.index(), "[script] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[script] filter does not support [" + currentFieldName + "]"); } } } @@ -108,16 +103,13 @@ public class ScriptFilterParser implements FilterParser { scriptLang = scriptParameterParser.lang(); if (script == null) { - throw new QueryParsingException(parseContext.index(), "script must be provided with a [script] filter"); + throw new QueryParsingException(parseContext, "script must be provided with a [script] filter"); } if (params == null) { params = newHashMap(); } Filter filter = new ScriptFilter(scriptLang, script, scriptType, params, parseContext.scriptService(), parseContext.lookup()); - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); - } if (filterName != null) { parseContext.addNamedFilter(filterName, filter); } @@ -135,7 +127,7 @@ public class ScriptFilterParser implements FilterParser { public ScriptFilter(String scriptLang, String script, ScriptService.ScriptType scriptType, Map params, ScriptService scriptService, SearchLookup searchLookup) { this.script = script; this.params = params; - this.searchScript = scriptService.search(searchLookup, scriptLang, script, scriptType, ScriptContext.Standard.SEARCH, newHashMap(params)); + this.searchScript = scriptService.search(searchLookup, new Script(scriptLang, script, scriptType, newHashMap(params)), ScriptContext.Standard.SEARCH); } @Override @@ -150,7 +142,7 @@ public class ScriptFilterParser implements FilterParser { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; ScriptFilter that = (ScriptFilter) o; @@ -162,8 +154,9 @@ public class ScriptFilterParser implements FilterParser { @Override public int hashCode() { - int result = script != null ? script.hashCode() : 0; - result = 31 * result + (params != null ? params.hashCode() : 0); + int result = super.hashCode(); + result = 31 * result + Objects.hashCode(script); + result = 31 * result + Objects.hashCode(params); return result; } @@ -196,7 +189,7 @@ public class ScriptFilterParser implements FilterParser { if (val instanceof Number) { return ((Number) val).longValue() != 0; } - throw new ElasticsearchIllegalArgumentException("Can't handle type [" + val + "] in script filter"); + throw new IllegalArgumentException("Can't handle type [" + val + "] in script filter"); } @Override @@ -205,4 +198,4 @@ public class ScriptFilterParser implements FilterParser { } } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java index 54c6291951d..ce0ce88a9e4 100644 --- a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java +++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringFlag.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import java.util.Locale; @@ -72,9 +71,9 @@ public enum SimpleQueryStringFlag { magic |= flag.value(); } } catch (IllegalArgumentException iae) { - throw new ElasticsearchIllegalArgumentException("Unknown " + SimpleQueryStringParser.NAME + " flag [" + s + "]"); + throw new IllegalArgumentException("Unknown " + SimpleQueryStringParser.NAME + " flag [" + s + "]"); } } return magic; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java index 43e64ce0280..446dbc95b57 100644 --- a/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java +++ b/src/main/java/org/elasticsearch/index/query/SimpleQueryStringParser.java @@ -139,8 +139,9 @@ public class SimpleQueryStringParser implements QueryParser { } } } else { - throw new QueryParsingException(parseContext.index(), - "[" + NAME + "] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, + "[" + NAME + "] query does not support [" + currentFieldName + + "]"); } } else if (token.isValue()) { if ("query".equals(currentFieldName)) { @@ -148,7 +149,7 @@ public class SimpleQueryStringParser implements QueryParser { } else if ("analyzer".equals(currentFieldName)) { analyzer = parseContext.analysisService().analyzer(parser.text()); if (analyzer == null) { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] analyzer [" + parser.text() + "] not found"); + throw new QueryParsingException(parseContext, "[" + NAME + "] analyzer [" + parser.text() + "] not found"); } } else if ("field".equals(currentFieldName)) { field = parser.text(); @@ -159,8 +160,7 @@ public class SimpleQueryStringParser implements QueryParser { } else if ("and".equalsIgnoreCase(op)) { defaultOperator = BooleanClause.Occur.MUST; } else { - throw new QueryParsingException(parseContext.index(), - "[" + NAME + "] default operator [" + op + "] is not allowed"); + throw new QueryParsingException(parseContext, "[" + NAME + "] default operator [" + op + "] is not allowed"); } } else if ("flags".equals(currentFieldName)) { if (parser.currentToken() != XContentParser.Token.VALUE_NUMBER) { @@ -188,14 +188,14 @@ public class SimpleQueryStringParser implements QueryParser { } else if ("minimum_should_match".equals(currentFieldName)) { minimumShouldMatch = parser.textOrNull(); } else { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] unsupported field [" + parser.currentName() + "]"); + throw new QueryParsingException(parseContext, "[" + NAME + "] unsupported field [" + parser.currentName() + "]"); } } } // Query text is required if (queryBody == null) { - throw new QueryParsingException(parseContext.index(), "[" + NAME + "] query text missing"); + throw new QueryParsingException(parseContext, "[" + NAME + "] query text missing"); } // Support specifying only a field instead of a map diff --git a/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java new file mode 100644 index 00000000000..6fd2dee013a --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryBuilder.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Builder for {@link SpanContainingQuery}. + */ +public class SpanContainingQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder { + + private SpanQueryBuilder big; + private SpanQueryBuilder little; + private float boost = -1; + private String queryName; + + /** + * Sets the little clause, it must be contained within {@code big} for a match. + */ + public SpanContainingQueryBuilder little(SpanQueryBuilder clause) { + this.little = clause; + return this; + } + + /** + * Sets the big clause, it must enclose {@code little} for a match. + */ + public SpanContainingQueryBuilder big(SpanQueryBuilder clause) { + this.big = clause; + return this; + } + + @Override + public SpanContainingQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + /** + * Sets the query name for the filter that can be used when searching for matched_filters per hit. + */ + public SpanContainingQueryBuilder queryName(String queryName) { + this.queryName = queryName; + return this; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + if (big == null) { + throw new IllegalArgumentException("Must specify big clause when building a span_containing query"); + } + if (little == null) { + throw new IllegalArgumentException("Must specify little clause when building a span_containing query"); + } + builder.startObject(SpanContainingQueryParser.NAME); + + builder.field("big"); + big.toXContent(builder, params); + + builder.field("little"); + little.toXContent(builder, params); + + if (boost != -1) { + builder.field("boost", boost); + } + + if (queryName != null) { + builder.field("_name", queryName); + } + + builder.endObject(); + } +} diff --git a/src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java new file mode 100644 index 00000000000..63e312bf384 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanContainingQueryParser.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanContainingQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Parser for {@link SpanContainingQuery} + */ +public class SpanContainingQueryParser implements QueryParser { + + public static final String NAME = "span_containing"; + + @Inject + public SpanContainingQueryParser() { + } + + @Override + public String[] names() { + return new String[]{NAME, Strings.toCamelCase(NAME)}; + } + + @Override + public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { + XContentParser parser = parseContext.parser(); + + float boost = 1.0f; + String queryName = null; + SpanQuery big = null; + SpanQuery little = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("big".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(parseContext, "span_containing [big] must be of type span query"); + } + big = (SpanQuery) query; + } else if ("little".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (!(query instanceof SpanQuery)) { + throw new QueryParsingException(parseContext, "span_containing [little] must be of type span query"); + } + little = (SpanQuery) query; + } else { + throw new QueryParsingException(parseContext, "[span_containing] query does not support [" + currentFieldName + "]"); + } + } else if ("boost".equals(currentFieldName)) { + boost = parser.floatValue(); + } else if ("_name".equals(currentFieldName)) { + queryName = parser.text(); + } else { + throw new QueryParsingException(parseContext, "[span_containing] query does not support [" + currentFieldName + "]"); + } + } + + if (big == null) { + throw new QueryParsingException(parseContext, "span_containing must include [big]"); + } + if (little == null) { + throw new QueryParsingException(parseContext, "span_containing must include [little]"); + } + + Query query = new SpanContainingQuery(big, little); + query.setBoost(boost); + if (queryName != null) { + parseContext.addNamedQuery(queryName, query); + } + return query; + } +} diff --git a/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java index ea8ff3d3923..5a302eb17d7 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanFirstQueryParser.java @@ -63,11 +63,11 @@ public class SpanFirstQueryParser implements QueryParser { if ("match".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanFirst [match] must be of type span query"); + throw new QueryParsingException(parseContext, "spanFirst [match] must be of type span query"); } match = (SpanQuery) query; } else { - throw new QueryParsingException(parseContext.index(), "[span_first] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_first] query does not support [" + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -77,15 +77,15 @@ public class SpanFirstQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_first] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_first] query does not support [" + currentFieldName + "]"); } } } if (match == null) { - throw new QueryParsingException(parseContext.index(), "spanFirst must have [match] span query clause"); + throw new QueryParsingException(parseContext, "spanFirst must have [match] span query clause"); } if (end == -1) { - throw new QueryParsingException(parseContext.index(), "spanFirst must have [end] set for it"); + throw new QueryParsingException(parseContext, "spanFirst must have [end] set for it"); } SpanFirstQuery query = new SpanFirstQuery(match, end); diff --git a/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java index 7c9b2a67277..a44580a5176 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanMultiTermQueryParser.java @@ -51,17 +51,17 @@ public class SpanMultiTermQueryParser implements QueryParser { Token token = parser.nextToken(); if (!MATCH_NAME.equals(parser.currentName()) || token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); + throw new QueryParsingException(parseContext, "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); } token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(parseContext.index(), "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); + throw new QueryParsingException(parseContext, "spanMultiTerm must have [" + MATCH_NAME + "] multi term query clause"); } Query subQuery = parseContext.parseInnerQuery(); if (!(subQuery instanceof MultiTermQuery)) { - throw new QueryParsingException(parseContext.index(), "spanMultiTerm [" + MATCH_NAME + "] must be of type multi term query"); + throw new QueryParsingException(parseContext, "spanMultiTerm [" + MATCH_NAME + "] must be of type multi term query"); } parser.nextToken(); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java index 286fab968eb..2e89d2ff93f 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -79,10 +78,10 @@ public class SpanNearQueryBuilder extends BaseQueryBuilder implements SpanQueryB @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (clauses.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("Must have at least one clause when building a spanNear query"); + throw new IllegalArgumentException("Must have at least one clause when building a spanNear query"); } if (slop == null) { - throw new ElasticsearchIllegalArgumentException("Must set the slop when building a spanNear query"); + throw new IllegalArgumentException("Must set the slop when building a spanNear query"); } builder.startObject(SpanNearQueryParser.NAME); builder.startArray("clauses"); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java index 84283fce1a4..6ecf1b70bea 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNearQueryParser.java @@ -69,12 +69,12 @@ public class SpanNearQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanNear [clauses] must be of type span query"); + throw new QueryParsingException(parseContext, "spanNear [clauses] must be of type span query"); } clauses.add((SpanQuery) query); } } else { - throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_near] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("in_order".equals(currentFieldName) || "inOrder".equals(currentFieldName)) { @@ -88,17 +88,17 @@ public class SpanNearQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_near] query does not support [" + currentFieldName + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[span_near] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_near] query does not support [" + currentFieldName + "]"); } } if (clauses.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "span_near must include [clauses]"); + throw new QueryParsingException(parseContext, "span_near must include [clauses]"); } if (slop == null) { - throw new QueryParsingException(parseContext.index(), "span_near must include [slop]"); + throw new QueryParsingException(parseContext, "span_near must include [slop]"); } SpanNearQuery query = new SpanNearQuery(clauses.toArray(new SpanQuery[clauses.size()]), slop.intValue(), inOrder, collectPayloads); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java index cb14dae11d2..2bcbab5487b 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNotQueryBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -87,14 +86,14 @@ public class SpanNotQueryBuilder extends BaseQueryBuilder implements SpanQueryBu @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (include == null) { - throw new ElasticsearchIllegalArgumentException("Must specify include when using spanNot query"); + throw new IllegalArgumentException("Must specify include when using spanNot query"); } if (exclude == null) { - throw new ElasticsearchIllegalArgumentException("Must specify exclude when using spanNot query"); + throw new IllegalArgumentException("Must specify exclude when using spanNot query"); } if (dist != null && (pre != null || post != null)) { - throw new ElasticsearchIllegalArgumentException("spanNot can either use [dist] or [pre] & [post] (or none)"); + throw new IllegalArgumentException("spanNot can either use [dist] or [pre] & [post] (or none)"); } builder.startObject(SpanNotQueryParser.NAME); diff --git a/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java index afadf4c68ef..bcb62e7a224 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanNotQueryParser.java @@ -68,17 +68,17 @@ public class SpanNotQueryParser implements QueryParser { if ("include".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanNot [include] must be of type span query"); + throw new QueryParsingException(parseContext, "spanNot [include] must be of type span query"); } include = (SpanQuery) query; } else if ("exclude".equals(currentFieldName)) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanNot [exclude] must be of type span query"); + throw new QueryParsingException(parseContext, "spanNot [exclude] must be of type span query"); } exclude = (SpanQuery) query; } else { - throw new QueryParsingException(parseContext.index(), "[span_not] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_not] query does not support [" + currentFieldName + "]"); } } else { if ("dist".equals(currentFieldName)) { @@ -92,18 +92,18 @@ public class SpanNotQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_not] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_not] query does not support [" + currentFieldName + "]"); } } } if (include == null) { - throw new QueryParsingException(parseContext.index(), "spanNot must have [include] span query clause"); + throw new QueryParsingException(parseContext, "spanNot must have [include] span query clause"); } if (exclude == null) { - throw new QueryParsingException(parseContext.index(), "spanNot must have [exclude] span query clause"); + throw new QueryParsingException(parseContext, "spanNot must have [exclude] span query clause"); } if (dist != null && (pre != null || post != null)) { - throw new QueryParsingException(parseContext.index(), "spanNot can either use [dist] or [pre] & [post] (or none)"); + throw new QueryParsingException(parseContext, "spanNot can either use [dist] or [pre] & [post] (or none)"); } // set appropriate defaults diff --git a/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java index f780ed8b05d..b453809e837 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/SpanOrQueryBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; @@ -58,7 +57,7 @@ public class SpanOrQueryBuilder extends BaseQueryBuilder implements SpanQueryBui @Override protected void doXContent(XContentBuilder builder, Params params) throws IOException { if (clauses.isEmpty()) { - throw new ElasticsearchIllegalArgumentException("Must have at least one clause when building a spanOr query"); + throw new IllegalArgumentException("Must have at least one clause when building a spanOr query"); } builder.startObject(SpanOrQueryParser.NAME); builder.startArray("clauses"); diff --git a/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java index a9d12f6d941..db58d4cca82 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanOrQueryParser.java @@ -66,12 +66,12 @@ public class SpanOrQueryParser implements QueryParser { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Query query = parseContext.parseInnerQuery(); if (!(query instanceof SpanQuery)) { - throw new QueryParsingException(parseContext.index(), "spanOr [clauses] must be of type span query"); + throw new QueryParsingException(parseContext, "spanOr [clauses] must be of type span query"); } clauses.add((SpanQuery) query); } } else { - throw new QueryParsingException(parseContext.index(), "[span_or] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_or] query does not support [" + currentFieldName + "]"); } } else { if ("boost".equals(currentFieldName)) { @@ -79,12 +79,12 @@ public class SpanOrQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_or] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_or] query does not support [" + currentFieldName + "]"); } } } if (clauses.isEmpty()) { - throw new QueryParsingException(parseContext.index(), "spanOr must include [clauses]"); + throw new QueryParsingException(parseContext, "spanOr must include [clauses]"); } SpanOrQuery query = new SpanOrQuery(clauses.toArray(new SpanQuery[clauses.size()])); diff --git a/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java index 0203bb26051..535b626306a 100644 --- a/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/SpanTermQueryParser.java @@ -77,7 +77,7 @@ public class SpanTermQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[span_term] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[span_term] query does not support [" + currentFieldName + "]"); } } } @@ -89,7 +89,7 @@ public class SpanTermQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term query"); + throw new QueryParsingException(parseContext, "No value specified for term query"); } BytesRef valueBytes = null; diff --git a/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java new file mode 100644 index 00000000000..88e1538bff4 --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryBuilder.java @@ -0,0 +1,92 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; + +/** + * Builder for {@link SpanWithinQuery}. + */ +public class SpanWithinQueryBuilder extends BaseQueryBuilder implements SpanQueryBuilder, BoostableQueryBuilder { + + private SpanQueryBuilder big; + private SpanQueryBuilder little; + private float boost = -1; + private String queryName; + + /** + * Sets the little clause, it must be contained within {@code big} for a match. + */ + public SpanWithinQueryBuilder little(SpanQueryBuilder clause) { + this.little = clause; + return this; + } + + /** + * Sets the big clause, it must enclose {@code little} for a match. + */ + public SpanWithinQueryBuilder big(SpanQueryBuilder clause) { + this.big = clause; + return this; + } + + @Override + public SpanWithinQueryBuilder boost(float boost) { + this.boost = boost; + return this; + } + + /** + * Sets the query name for the filter that can be used when searching for matched_filters per hit. + */ + public SpanWithinQueryBuilder queryName(String queryName) { + this.queryName = queryName; + return this; + } + + @Override + protected void doXContent(XContentBuilder builder, Params params) throws IOException { + if (big == null) { + throw new IllegalArgumentException("Must specify big clause when building a span_within query"); + } + if (little == null) { + throw new IllegalArgumentException("Must specify little clause when building a span_within query"); + } + builder.startObject(SpanWithinQueryParser.NAME); + + builder.field("big"); + big.toXContent(builder, params); + + builder.field("little"); + little.toXContent(builder, params); + + if (boost != -1) { + builder.field("boost", boost); + } + + if (queryName != null) { + builder.field("_name", queryName); + } + + builder.endObject(); + } +} diff --git a/src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java new file mode 100644 index 00000000000..9194cbd2d0e --- /dev/null +++ b/src/main/java/org/elasticsearch/index/query/SpanWithinQueryParser.java @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanWithinQuery; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Parser for {@link SpanWithinQuery} + */ +public class SpanWithinQueryParser implements QueryParser { + + public static final String NAME = "span_within"; + + @Inject + public SpanWithinQueryParser() { + } + + @Override + public String[] names() { + return new String[]{NAME, Strings.toCamelCase(NAME)}; + } + + @Override + public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { + XContentParser parser = parseContext.parser(); + + float boost = 1.0f; + String queryName = null; + SpanQuery big = null; + SpanQuery little = null; + + String currentFieldName = null; + XContentParser.Token token; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + if ("big".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (query instanceof SpanQuery == false) { + throw new QueryParsingException(parseContext, "span_within [big] must be of type span query"); + } + big = (SpanQuery) query; + } else if ("little".equals(currentFieldName)) { + Query query = parseContext.parseInnerQuery(); + if (query instanceof SpanQuery == false) { + throw new QueryParsingException(parseContext, "span_within [little] must be of type span query"); + } + little = (SpanQuery) query; + } else { + throw new QueryParsingException(parseContext, "[span_within] query does not support [" + currentFieldName + "]"); + } + } else if ("boost".equals(currentFieldName)) { + boost = parser.floatValue(); + } else if ("_name".equals(currentFieldName)) { + queryName = parser.text(); + } else { + throw new QueryParsingException(parseContext, "[span_within] query does not support [" + currentFieldName + "]"); + } + } + + if (big == null) { + throw new QueryParsingException(parseContext, "span_within must include [big]"); + } + if (little == null) { + throw new QueryParsingException(parseContext, "span_within must include [little]"); + } + + Query query = new SpanWithinQuery(big, little); + query.setBoost(boost); + if (queryName != null) { + parseContext.addNamedQuery(queryName, query); + } + return query; + } +} diff --git a/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java index 176f97f4fff..ca35a42ae3b 100644 --- a/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TemplateQueryBuilder.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.ScriptService; @@ -71,7 +70,7 @@ public class TemplateQueryBuilder extends BaseQueryBuilder { fieldname = TemplateQueryParser.QUERY; break; default: - throw new ElasticsearchIllegalArgumentException("Unknown template type " + templateType); + throw new IllegalArgumentException("Unknown template type " + templateType); } builder.field(fieldname, template); builder.field(TemplateQueryParser.PARAMS, vars); diff --git a/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java b/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java index 41bf930e4a8..32872f8f7a0 100644 --- a/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TemplateQueryParser.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.mustache.MustacheScriptEngineService; @@ -77,7 +78,7 @@ public class TemplateQueryParser implements QueryParser { public Query parse(QueryParseContext parseContext) throws IOException { XContentParser parser = parseContext.parser(); TemplateContext templateContext = parse(parser, PARAMS, parametersToTypes); - ExecutableScript executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), ScriptContext.Standard.SEARCH, templateContext.params()); + ExecutableScript executable = this.scriptService.executable(new Script(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params()), ScriptContext.Standard.SEARCH); BytesReference querySource = (BytesReference) executable.run(); diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java index 74349a00603..3ca5069127d 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterBuilder.java @@ -34,9 +34,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { private final Object value; - private Boolean cache; - private String cacheKey; - private String filterName; /** @@ -108,19 +105,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to true. - */ - public TermFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermFilterParser.NAME); @@ -128,12 +112,6 @@ public class TermFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java index f03a8a43cae..0224a6384da 100644 --- a/src/main/java/org/elasticsearch/index/query/TermFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermFilterParser.java @@ -21,12 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -52,8 +50,6 @@ public class TermFilterParser implements FilterParser { public Filter parse(QueryParseContext parseContext) throws IOException, QueryParsingException { XContentParser parser = parseContext.parser(); - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); - HashedBytesRef cacheKey = null; String fieldName = null; Object value = null; @@ -63,6 +59,8 @@ public class TermFilterParser implements FilterParser { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_OBJECT) { // also support a format of "term" : {"field_name" : { ... }} fieldName = currentFieldName; @@ -76,22 +74,14 @@ public class TermFilterParser implements FilterParser { value = parser.objectBytes(); } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[term] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[term] filter does not support [" + currentFieldName + "]"); } } } } else if (token.isValue()) { if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { fieldName = currentFieldName; value = parser.objectBytes(); @@ -100,11 +90,11 @@ public class TermFilterParser implements FilterParser { } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "No field specified for term filter"); + throw new QueryParsingException(parseContext, "No field specified for term filter"); } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term filter"); + throw new QueryParsingException(parseContext, "No value specified for term filter"); } Filter filter = null; @@ -113,11 +103,7 @@ public class TermFilterParser implements FilterParser { filter = smartNameFieldMappers.mapper().termFilter(value, parseContext); } if (filter == null) { - filter = Queries.wrap(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new TermQuery(new Term(fieldName, BytesRefs.toBytesRef(value)))); } if (filterName != null) { @@ -125,4 +111,4 @@ public class TermFilterParser implements FilterParser { } return filter; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/TermQueryParser.java b/src/main/java/org/elasticsearch/index/query/TermQueryParser.java index 2c016973b6e..3a913fc21ad 100644 --- a/src/main/java/org/elasticsearch/index/query/TermQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermQueryParser.java @@ -51,7 +51,7 @@ public class TermQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[term] query malformed, no field"); + throw new QueryParsingException(parseContext, "[term] query malformed, no field"); } String fieldName = parser.currentName(); @@ -74,7 +74,7 @@ public class TermQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[term] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[term] query does not support [" + currentFieldName + "]"); } } } @@ -86,7 +86,7 @@ public class TermQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for term query"); + throw new QueryParsingException(parseContext, "No value specified for term query"); } Query query = null; diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java index a6331fb51a6..d753235e341 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterBuilder.java @@ -32,9 +32,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { private final Object values; - private Boolean cache; - private String cacheKey; - private String filterName; private String execution; @@ -134,19 +131,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { return this; } - /** - * Should the filter be cached or not. Defaults to false. - */ - public TermsFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermsFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermsFilterParser.NAME); @@ -159,12 +143,6 @@ public class TermsFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java index 3c5ecd15106..ce8a8122665 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermsFilterParser.java @@ -23,14 +23,13 @@ import com.google.common.collect.Lists; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.BytesRefs; -import org.elasticsearch.common.lucene.HashedBytesRef; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -71,7 +70,6 @@ public class TermsFilterParser implements FilterParser { XContentParser parser = parseContext.parser(); MapperService.SmartNameFieldMappers smartNameFieldMappers; - QueryCachingPolicy cache = parseContext.autoFilterCachePolicy(); String filterName = null; String currentFieldName = null; @@ -81,23 +79,24 @@ public class TermsFilterParser implements FilterParser { String lookupPath = null; String lookupRouting = null; - HashedBytesRef cacheKey = null; XContentParser.Token token; List terms = Lists.newArrayList(); String fieldName = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); + } else if (parseContext.isDeprecatedSetting(currentFieldName)) { + // skip } else if (token == XContentParser.Token.START_ARRAY) { if (fieldName != null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter does not support multiple fields"); + throw new QueryParsingException(parseContext, "[terms] filter does not support multiple fields"); } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Object value = parser.objectBytes(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for terms filter"); + throw new QueryParsingException(parseContext, "No value specified for terms filter"); } terms.add(value); } @@ -118,36 +117,33 @@ public class TermsFilterParser implements FilterParser { } else if ("routing".equals(currentFieldName)) { lookupRouting = parser.textOrNull(); } else { - throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "] within lookup element"); + throw new QueryParsingException(parseContext, "[terms] filter does not support [" + currentFieldName + + "] within lookup element"); } } } if (lookupType == null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the type"); + throw new QueryParsingException(parseContext, "[terms] filter lookup element requires specifying the type"); } if (lookupId == null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the id"); + throw new QueryParsingException(parseContext, "[terms] filter lookup element requires specifying the id"); } if (lookupPath == null) { - throw new QueryParsingException(parseContext.index(), "[terms] filter lookup element requires specifying the path"); + throw new QueryParsingException(parseContext, "[terms] filter lookup element requires specifying the path"); } } else if (token.isValue()) { if (EXECUTION_KEY.equals(currentFieldName)) { // ignore } else if ("_name".equals(currentFieldName)) { filterName = parser.text(); - } else if ("_cache".equals(currentFieldName)) { - cache = parseContext.parseFilterCachePolicy(); - } else if ("_cache_key".equals(currentFieldName) || "_cacheKey".equals(currentFieldName)) { - cacheKey = new HashedBytesRef(parser.text()); } else { - throw new QueryParsingException(parseContext.index(), "[terms] filter does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[terms] filter does not support [" + currentFieldName + "]"); } } } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "terms filter requires a field name, followed by array of terms"); + throw new QueryParsingException(parseContext, "terms filter requires a field name, followed by array of terms"); } FieldMapper fieldMapper = null; @@ -180,11 +176,7 @@ public class TermsFilterParser implements FilterParser { for (int i = 0; i < filterValues.length; i++) { filterValues[i] = BytesRefs.toBytesRef(terms.get(i)); } - filter = Queries.wrap(new TermsQuery(fieldName, filterValues)); - } - - if (cache != null) { - filter = parseContext.cacheFilter(filter, cacheKey, cache); + filter = new QueryWrapperFilter(new TermsQuery(fieldName, filterValues)); } if (filterName != null) { diff --git a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java index 1c23c8f338c..1a9473ede40 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/TermsLookupFilterBuilder.java @@ -36,8 +36,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { private String lookupPath; private Boolean lookupCache; - private Boolean cache; - private String cacheKey; private String filterName; public TermsLookupFilterBuilder(String name) { @@ -94,16 +92,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { return this; } - public TermsLookupFilterBuilder cache(boolean cache) { - this.cache = cache; - return this; - } - - public TermsLookupFilterBuilder cacheKey(String cacheKey) { - this.cacheKey = cacheKey; - return this; - } - @Override public void doXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(TermsFilterParser.NAME); @@ -126,12 +114,6 @@ public class TermsLookupFilterBuilder extends BaseFilterBuilder { if (filterName != null) { builder.field("_name", filterName); } - if (cache != null) { - builder.field("_cache", cache); - } - if (cacheKey != null) { - builder.field("_cache_key", cacheKey); - } builder.endObject(); } diff --git a/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java b/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java index 15c9f18388e..dcf078d19b1 100644 --- a/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TermsQueryParser.java @@ -75,13 +75,13 @@ public class TermsQueryParser implements QueryParser { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { if (fieldName != null) { - throw new QueryParsingException(parseContext.index(), "[terms] query does not support multiple fields"); + throw new QueryParsingException(parseContext, "[terms] query does not support multiple fields"); } fieldName = currentFieldName; while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { Object value = parser.objectBytes(); if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for terms query"); + throw new QueryParsingException(parseContext, "No value specified for terms query"); } values.add(value); } @@ -97,15 +97,15 @@ public class TermsQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[terms] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[terms] query does not support [" + currentFieldName + "]"); } } else { - throw new QueryParsingException(parseContext.index(), "[terms] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[terms] query does not support [" + currentFieldName + "]"); } } if (fieldName == null) { - throw new QueryParsingException(parseContext.index(), "No field specified for terms query"); + throw new QueryParsingException(parseContext, "No field specified for terms query"); } FieldMapper mapper = null; diff --git a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java index a0f5834b44e..6c1b0e45aaa 100644 --- a/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/TopChildrenQueryParser.java @@ -20,23 +20,21 @@ package org.elasticsearch.index.query; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.query.support.XContentStructure; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ScoreType; import org.elasticsearch.index.search.child.TopChildrenQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import java.io.IOException; -import static org.elasticsearch.index.query.QueryParserUtils.ensureNotDeleteByQuery; - /** * */ @@ -55,7 +53,6 @@ public class TopChildrenQueryParser implements QueryParser { @Override public Query parse(QueryParseContext parseContext) throws IOException, QueryParsingException { - ensureNotDeleteByQuery(NAME, parseContext); XContentParser parser = parseContext.parser(); boolean queryFound = false; @@ -81,7 +78,7 @@ public class TopChildrenQueryParser implements QueryParser { iq = new XContentStructure.InnerQuery(parseContext, childType == null ? null : new String[] {childType}); queryFound = true; } else { - throw new QueryParsingException(parseContext.index(), "[top_children] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[top_children] query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if ("type".equals(currentFieldName)) { @@ -99,15 +96,15 @@ public class TopChildrenQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[top_children] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[top_children] query does not support [" + currentFieldName + "]"); } } } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[top_children] requires 'query' field"); + throw new QueryParsingException(parseContext, "[top_children] requires 'query' field"); } if (childType == null) { - throw new QueryParsingException(parseContext.index(), "[top_children] requires 'type' field"); + throw new QueryParsingException(parseContext, "[top_children] requires 'type' field"); } Query innerQuery = iq.asQuery(childType); @@ -118,26 +115,26 @@ public class TopChildrenQueryParser implements QueryParser { DocumentMapper childDocMapper = parseContext.mapperService().documentMapper(childType); if (childDocMapper == null) { - throw new QueryParsingException(parseContext.index(), "No mapping for for type [" + childType + "]"); + throw new QueryParsingException(parseContext, "No mapping for for type [" + childType + "]"); } ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new QueryParsingException(parseContext.index(), "Type [" + childType + "] does not have parent mapping"); + throw new QueryParsingException(parseContext, "Type [" + childType + "] does not have parent mapping"); } String parentType = childDocMapper.parentFieldMapper().type(); BitDocIdSetFilter nonNestedDocsFilter = null; if (childDocMapper.hasNestedObjects()) { - nonNestedDocsFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + nonNestedDocsFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } innerQuery.setBoost(boost); // wrap the query with type query - innerQuery = new FilteredQuery(innerQuery, parseContext.cacheFilter(childDocMapper.typeFilter(), null, parseContext.autoFilterCachePolicy())); + innerQuery = new FilteredQuery(innerQuery, childDocMapper.typeFilter()); ParentChildIndexFieldData parentChildIndexFieldData = parseContext.getForField(parentFieldMapper); TopChildrenQuery query = new TopChildrenQuery(parentChildIndexFieldData, innerQuery, childType, parentType, scoreType, factor, incrementalFactor, nonNestedDocsFilter); if (queryName != null) { - parseContext.addNamedFilter(queryName, new CustomQueryWrappingFilter(query)); + parseContext.addNamedFilter(queryName, new QueryWrapperFilter(query)); } return query; } diff --git a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java index e4ae0b957e0..6c9e9523e76 100644 --- a/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/TypeFilterParser.java @@ -21,10 +21,10 @@ package org.elasticsearch.index.query; import org.apache.lucene.index.Term; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; @@ -50,15 +50,15 @@ public class TypeFilterParser implements FilterParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name"); + throw new QueryParsingException(parseContext, "[type] filter should have a value field, and the type name"); } String fieldName = parser.currentName(); if (!fieldName.equals("value")) { - throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name"); + throw new QueryParsingException(parseContext, "[type] filter should have a value field, and the type name"); } token = parser.nextToken(); if (token != XContentParser.Token.VALUE_STRING) { - throw new QueryParsingException(parseContext.index(), "[type] filter should have a value field, and the type name"); + throw new QueryParsingException(parseContext, "[type] filter should have a value field, and the type name"); } BytesRef type = parser.utf8Bytes(); // move to the next token @@ -68,10 +68,10 @@ public class TypeFilterParser implements FilterParser { //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = parseContext.mapperService().documentMapper(type.utf8ToString()); if (documentMapper == null) { - filter = Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, type))); + filter = new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, type))); } else { filter = documentMapper.typeFilter(); } - return parseContext.cacheFilter(filter, null, parseContext.autoFilterCachePolicy()); + return filter; } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java b/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java index 6a641e96219..a661c185878 100644 --- a/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/WildcardQueryParser.java @@ -52,7 +52,7 @@ public class WildcardQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[wildcard] query malformed, no field"); + throw new QueryParsingException(parseContext, "[wildcard] query malformed, no field"); } String fieldName = parser.currentName(); String rewriteMethod = null; @@ -78,7 +78,7 @@ public class WildcardQueryParser implements QueryParser { } else if ("_name".equals(currentFieldName)) { queryName = parser.text(); } else { - throw new QueryParsingException(parseContext.index(), "[wildcard] query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, "[wildcard] query does not support [" + currentFieldName + "]"); } } } @@ -89,7 +89,7 @@ public class WildcardQueryParser implements QueryParser { } if (value == null) { - throw new QueryParsingException(parseContext.index(), "No value specified for prefix query"); + throw new QueryParsingException(parseContext, "No value specified for prefix query"); } BytesRef valueBytes; diff --git a/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java b/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java index 2346d65943d..35ca8724453 100644 --- a/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java +++ b/src/main/java/org/elasticsearch/index/query/WrapperFilterParser.java @@ -48,11 +48,11 @@ public class WrapperFilterParser implements FilterParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[wrapper] filter malformed"); + throw new QueryParsingException(parseContext, "[wrapper] filter malformed"); } String fieldName = parser.currentName(); if (!fieldName.equals("filter")) { - throw new QueryParsingException(parseContext.index(), "[wrapper] filter malformed"); + throw new QueryParsingException(parseContext, "[wrapper] filter malformed"); } parser.nextToken(); diff --git a/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java b/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java index 3fc16d7af74..f7b98ad3dd5 100644 --- a/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/WrapperQueryParser.java @@ -48,11 +48,11 @@ public class WrapperQueryParser implements QueryParser { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new QueryParsingException(parseContext.index(), "[wrapper] query malformed"); + throw new QueryParsingException(parseContext, "[wrapper] query malformed"); } String fieldName = parser.currentName(); if (!fieldName.equals("query")) { - throw new QueryParsingException(parseContext.index(), "[wrapper] query malformed"); + throw new QueryParsingException(parseContext, "[wrapper] query malformed"); } parser.nextToken(); diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java index ce9f5231949..96aabb2cc44 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.MultiValueMode; @@ -48,7 +47,7 @@ public abstract class DecayFunctionBuilder extends ScoreFunctionBuilder { public DecayFunctionBuilder setDecay(double decay) { if (decay <= 0 || decay >= 1.0) { - throw new ElasticsearchIllegalStateException("scale weight parameter must be in range 0..1!"); + throw new IllegalStateException("scale weight parameter must be in range 0..1!"); } this.decay = decay; return this; @@ -89,4 +88,4 @@ public abstract class DecayFunctionBuilder extends ScoreFunctionBuilder { this.multiValueMode = MultiValueMode.fromString(multiValueMode.toUpperCase(Locale.ROOT)); return this; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java index daf9aca1b4e..001bdf05854 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/DecayFunctionParser.java @@ -20,9 +20,7 @@ package org.elasticsearch.index.query.functionscore; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.geo.GeoDistance; @@ -156,7 +154,7 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { // the doc later MapperService.SmartNameFieldMappers smartMappers = parseContext.smartFieldMappers(fieldName); if (smartMappers == null || !smartMappers.hasMapper()) { - throw new QueryParsingException(parseContext.index(), "Unknown field [" + fieldName + "]"); + throw new QueryParsingException(parseContext, "Unknown field [" + fieldName + "]"); } FieldMapper mapper = smartMappers.fieldMappers().mapper(); @@ -169,7 +167,7 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { } else if (mapper instanceof NumberFieldMapper) { return parseNumberVariable(fieldName, parser, parseContext, (NumberFieldMapper) mapper, mode); } else { - throw new QueryParsingException(parseContext.index(), "Field " + fieldName + " is of type " + mapper.fieldType() + throw new QueryParsingException(parseContext, "Field " + fieldName + " is of type " + mapper.fieldType() + ", but only numeric types are supported."); } } @@ -429,16 +427,16 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { super(CombineFunction.MULT); this.mode = mode; if (userSuppiedScale <= 0.0) { - throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : scale must be > 0.0."); + throw new IllegalArgumentException(FunctionScoreQueryParser.NAME + " : scale must be > 0.0."); } if (decay <= 0.0 || decay >= 1.0) { - throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + throw new IllegalArgumentException(FunctionScoreQueryParser.NAME + " : decay must be in the range [0..1]."); } this.scale = func.processScale(userSuppiedScale, decay); this.func = func; if (offset < 0.0d) { - throw new ElasticsearchIllegalArgumentException(FunctionScoreQueryParser.NAME + " : offset must be > 0.0"); + throw new IllegalArgumentException(FunctionScoreQueryParser.NAME + " : offset must be > 0.0"); } this.offset = offset; } @@ -463,12 +461,10 @@ public abstract class DecayFunctionParser implements ScoreFunctionParser { @Override public Explanation explainScore(int docId, Explanation subQueryScore) throws IOException { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue(CombineFunction.toFloat(score(docId, subQueryScore.getValue()))); - ce.setMatch(true); - ce.setDescription("Function for field " + getFieldName() + ":"); - ce.addDetail(func.explainFunction(getDistanceString(ctx, docId), distance.get(docId), scale)); - return ce; + return Explanation.match( + CombineFunction.toFloat(score(docId, subQueryScore.getValue())), + "Function for field " + getFieldName() + ":", + func.explainFunction(getDistanceString(ctx, docId), distance.get(docId), scale)); } }; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java index 53804abfe6c..f7856462bfa 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryBuilder.java @@ -19,9 +19,7 @@ package org.elasticsearch.index.query.functionscore; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.lucene.search.function.CombineFunction; -import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.BaseQueryBuilder; import org.elasticsearch.index.query.BoostableQueryBuilder; @@ -99,7 +97,7 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost */ public FunctionScoreQueryBuilder(ScoreFunctionBuilder scoreFunctionBuilder) { if (scoreFunctionBuilder == null) { - throw new ElasticsearchIllegalArgumentException("function_score: function must not be null"); + throw new IllegalArgumentException("function_score: function must not be null"); } queryBuilder = null; filterBuilder = null; @@ -115,7 +113,7 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost */ public FunctionScoreQueryBuilder add(FilterBuilder filter, ScoreFunctionBuilder scoreFunctionBuilder) { if (scoreFunctionBuilder == null) { - throw new ElasticsearchIllegalArgumentException("function_score: function must not be null"); + throw new IllegalArgumentException("function_score: function must not be null"); } this.filters.add(filter); this.scoreFunctions.add(scoreFunctionBuilder); @@ -129,7 +127,7 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost */ public FunctionScoreQueryBuilder add(ScoreFunctionBuilder scoreFunctionBuilder) { if (scoreFunctionBuilder == null) { - throw new ElasticsearchIllegalArgumentException("function_score: function must not be null"); + throw new IllegalArgumentException("function_score: function must not be null"); } this.filters.add(null); this.scoreFunctions.add(scoreFunctionBuilder); @@ -227,4 +225,4 @@ public class FunctionScoreQueryBuilder extends BaseQueryBuilder implements Boost this.minScore = minScore; return this; } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java index 10d4c7f3d55..734ab2f7759 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/FunctionScoreQueryParser.java @@ -134,7 +134,7 @@ public class FunctionScoreQueryParser implements QueryParser { // we try to parse a score function. If there is no score // function for the current field name, // functionParserMapper.get() will throw an Exception. - scoreFunction = functionParserMapper.get(parseContext.index(), currentFieldName).parse(parseContext, parser); + scoreFunction = functionParserMapper.get(parseContext, currentFieldName).parse(parseContext, parser); } if (functionArrayFound) { String errorString = "Found \"functions\": [...] already, now encountering \"" + currentFieldName + "\"."; @@ -202,8 +202,8 @@ public class FunctionScoreQueryParser implements QueryParser { ScoreFunction scoreFunction = null; Float functionWeight = null; if (token != XContentParser.Token.START_OBJECT) { - throw new QueryParsingException(parseContext.index(), NAME + ": malformed query, expected a " - + XContentParser.Token.START_OBJECT + " while parsing functions but got a " + token); + throw new QueryParsingException(parseContext, NAME + ": malformed query, expected a " + XContentParser.Token.START_OBJECT + + " while parsing functions but got a " + token); } else { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -217,7 +217,7 @@ public class FunctionScoreQueryParser implements QueryParser { // do not need to check null here, // functionParserMapper throws exception if parser // non-existent - ScoreFunctionParser functionParser = functionParserMapper.get(parseContext.index(), currentFieldName); + ScoreFunctionParser functionParser = functionParserMapper.get(parseContext, currentFieldName); scoreFunction = functionParser.parse(parseContext, parser); } } @@ -253,7 +253,7 @@ public class FunctionScoreQueryParser implements QueryParser { } else if ("first".equals(scoreMode)) { return FiltersFunctionScoreQuery.ScoreMode.First; } else { - throw new QueryParsingException(parseContext.index(), NAME + " illegal score_mode [" + scoreMode + "]"); + throw new QueryParsingException(parseContext, NAME + " illegal score_mode [" + scoreMode + "]"); } } @@ -261,7 +261,7 @@ public class FunctionScoreQueryParser implements QueryParser { String boostMode = parser.text(); CombineFunction cf = combineFunctionsMap.get(boostMode); if (cf == null) { - throw new QueryParsingException(parseContext.index(), NAME + " illegal boost_mode [" + boostMode + "]"); + throw new QueryParsingException(parseContext, NAME + " illegal boost_mode [" + boostMode + "]"); } return cf; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java index 4f7d9de390b..abe8b5c4e35 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/ScoreFunctionParserMapper.java @@ -20,9 +20,10 @@ package org.elasticsearch.index.query.functionscore; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParsingException; import java.util.Set; @@ -42,10 +43,10 @@ public class ScoreFunctionParserMapper { this.functionParsers = builder.immutableMap(); } - public ScoreFunctionParser get(Index index, String parserName) { + public ScoreFunctionParser get(QueryParseContext parseContext, String parserName) { ScoreFunctionParser functionParser = get(parserName); if (functionParser == null) { - throw new QueryParsingException(index, "No function with the name [" + parserName + "] is registered."); + throw new QueryParsingException(parseContext, "No function with the name [" + parserName + "] is registered.", null); } return functionParser; } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java index 55b32c43a4d..bab04d4a1dc 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/exp/ExponentialDecayFunctionParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.exp; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.index.query.functionscore.DecayFunction; import org.elasticsearch.index.query.functionscore.DecayFunctionParser; @@ -49,10 +48,9 @@ public class ExponentialDecayFunctionParser extends DecayFunctionParser { @Override public Explanation explainFunction(String valueExpl, double value, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue((float) evaluate(value, scale)); - ce.setDescription("exp(- " + valueExpl + " * " + -1 * scale + ")"); - return ce; + return Explanation.match( + (float) evaluate(value, scale), + "exp(- " + valueExpl + " * " + -1 * scale + ")"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java index 286b32c7f70..3a176c46877 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/factor/FactorBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.factor; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.lucene.search.function.BoostScoreFunction; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.functionscore.ScoreFunctionBuilder; @@ -58,11 +57,11 @@ public class FactorBuilder extends ScoreFunctionBuilder { @Override public ScoreFunctionBuilder setWeight(float weight) { - throw new ElasticsearchIllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); + throw new IllegalArgumentException(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE); } @Override public void buildWeight(XContentBuilder builder) throws IOException { //we do not want the weight to be written for boost_factor as it does not make sense to have it } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java index 34a2f8bbc67..5d38c5a5eb5 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionBuilder.java @@ -33,6 +33,7 @@ import java.util.Locale; public class FieldValueFactorFunctionBuilder extends ScoreFunctionBuilder { private String field = null; private Float factor = null; + private Double missing = null; private FieldValueFactorFunction.Modifier modifier = null; public FieldValueFactorFunctionBuilder(String fieldName) { @@ -49,6 +50,14 @@ public class FieldValueFactorFunctionBuilder extends ScoreFunctionBuilder { return this; } + /** + * Value used instead of the field value for documents that don't have that field defined. + */ + public FieldValueFactorFunctionBuilder missing(double missing) { + this.missing = missing; + return this; + } + public FieldValueFactorFunctionBuilder modifier(FieldValueFactorFunction.Modifier modifier) { this.modifier = modifier; return this; @@ -65,6 +74,10 @@ public class FieldValueFactorFunctionBuilder extends ScoreFunctionBuilder { builder.field("factor", factor); } + if (missing != null) { + builder.field("missing", missing); + } + if (modifier != null) { builder.field("modifier", modifier.toString().toLowerCase(Locale.ROOT)); } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java index 3426dcbef3c..90c4b953bed 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/fieldvaluefactor/FieldValueFactorFunctionParser.java @@ -41,7 +41,8 @@ import java.util.Locale; * "field_value_factor": { * "field": "myfield", * "factor": 1.5, - * "modifier": "square" + * "modifier": "square", + * "missing": 1 * } * } * @@ -56,6 +57,7 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { String field = null; float boostFactor = 1; FieldValueFactorFunction.Modifier modifier = FieldValueFactorFunction.Modifier.NONE; + Double missing = null; XContentParser.Token token; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { @@ -67,16 +69,18 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { boostFactor = parser.floatValue(); } else if ("modifier".equals(currentFieldName)) { modifier = FieldValueFactorFunction.Modifier.valueOf(parser.text().toUpperCase(Locale.ROOT)); + } else if ("missing".equals(currentFieldName)) { + missing = parser.doubleValue(); } else { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } else if("factor".equals(currentFieldName) && (token == XContentParser.Token.START_ARRAY || token == XContentParser.Token.START_OBJECT)) { - throw new QueryParsingException(parseContext.index(), "[" + NAMES[0] + "] field 'factor' does not support lists or objects"); + throw new QueryParsingException(parseContext, "[" + NAMES[0] + "] field 'factor' does not support lists or objects"); } } if (field == null) { - throw new QueryParsingException(parseContext.index(), "[" + NAMES[0] + "] required field 'field' missing"); + throw new QueryParsingException(parseContext, "[" + NAMES[0] + "] required field 'field' missing"); } SearchContext searchContext = SearchContext.current(); @@ -84,7 +88,7 @@ public class FieldValueFactorFunctionParser implements ScoreFunctionParser { if (mapper == null) { throw new ElasticsearchException("Unable to find a field mapper for field [" + field + "]"); } - return new FieldValueFactorFunction(field, boostFactor, modifier, + return new FieldValueFactorFunction(field, boostFactor, modifier, missing, (IndexNumericFieldData)searchContext.fieldData().getForField(mapper)); } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java index 26f77d37086..614050a8fbe 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/gauss/GaussDecayFunctionParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.gauss; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.index.query.functionscore.DecayFunction; import org.elasticsearch.index.query.functionscore.DecayFunctionParser; @@ -45,10 +44,9 @@ public class GaussDecayFunctionParser extends DecayFunctionParser { @Override public Explanation explainFunction(String valueExpl, double value, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue((float) evaluate(value, scale)); - ce.setDescription("exp(-0.5*pow(" + valueExpl + ",2.0)/" + -1 * scale + ")"); - return ce; + return Explanation.match( + (float) evaluate(value, scale), + "exp(-0.5*pow(" + valueExpl + ",2.0)/" + -1 * scale + ")"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java index 05ecbbbfc04..215a7873ae3 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/lin/LinearDecayFunctionParser.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.functionscore.lin; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.index.query.functionscore.DecayFunction; import org.elasticsearch.index.query.functionscore.DecayFunctionParser; @@ -49,10 +48,9 @@ public class LinearDecayFunctionParser extends DecayFunctionParser { @Override public Explanation explainFunction(String valueExpl, double value, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setValue((float) evaluate(value, scale)); - ce.setDescription("max(0.0, ((" + scale + " - " + valueExpl + ")/" + scale + ")"); - return ce; + return Explanation.match( + (float) evaluate(value, scale), + "max(0.0, ((" + scale + " - " + valueExpl + ")/" + scale + ")"); } @Override diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java index 8bdc3074f3f..e4b26822d66 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/random/RandomScoreFunctionParser.java @@ -21,6 +21,7 @@ package org.elasticsearch.index.query.functionscore.random; import com.google.common.primitives.Longs; + import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.lucene.search.function.RandomScoreFunction; import org.elasticsearch.common.lucene.search.function.ScoreFunction; @@ -66,15 +67,17 @@ public class RandomScoreFunctionParser implements ScoreFunctionParser { } else if (parser.numberType() == XContentParser.NumberType.LONG) { seed = Longs.hashCode(parser.longValue()); } else { - throw new QueryParsingException(parseContext.index(), "random_score seed must be an int, long or string, not '" + token.toString() + "'"); + throw new QueryParsingException(parseContext, "random_score seed must be an int, long or string, not '" + + token.toString() + "'"); } } else if (token == XContentParser.Token.VALUE_STRING) { seed = parser.text().hashCode(); } else { - throw new QueryParsingException(parseContext.index(), "random_score seed must be an int/long or string, not '" + token.toString() + "'"); + throw new QueryParsingException(parseContext, "random_score seed must be an int/long or string, not '" + + token.toString() + "'"); } } else { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java index f87962702b5..b01eaee3615 100644 --- a/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java +++ b/src/main/java/org/elasticsearch/index/query/functionscore/script/ScriptScoreFunctionParser.java @@ -28,7 +28,9 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParsingException; import org.elasticsearch.index.query.functionscore.ScoreFunctionParser; -import org.elasticsearch.script.*; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptParameterParser; import org.elasticsearch.script.ScriptParameterParser.ScriptParameterValue; import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; @@ -67,11 +69,11 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { if ("params".equals(currentFieldName)) { vars = parser.map(); } else { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } else if (token.isValue()) { if (!scriptParameterParser.token(currentFieldName, token, parser)) { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " query does not support [" + currentFieldName + "]"); + throw new QueryParsingException(parseContext, NAMES[0] + " query does not support [" + currentFieldName + "]"); } } } @@ -82,15 +84,15 @@ public class ScriptScoreFunctionParser implements ScoreFunctionParser { scriptType = scriptValue.scriptType(); } if (script == null) { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " requires 'script' field"); + throw new QueryParsingException(parseContext, NAMES[0] + " requires 'script' field"); } SearchScript searchScript; try { - searchScript = parseContext.scriptService().search(parseContext.lookup(), scriptParameterParser.lang(), script, scriptType, ScriptContext.Standard.SEARCH, vars); + searchScript = parseContext.scriptService().search(parseContext.lookup(), new Script(scriptParameterParser.lang(), script, scriptType, vars), ScriptContext.Standard.SEARCH); return new ScriptScoreFunction(script, vars, searchScript); } catch (Exception e) { - throw new QueryParsingException(parseContext.index(), NAMES[0] + " the script could not be loaded", e); + throw new QueryParsingException(parseContext, NAMES[0] + " the script could not be loaded", e); } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java b/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java index 4dfba5fb3ec..ae839c41d1c 100644 --- a/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java +++ b/src/main/java/org/elasticsearch/index/query/support/InnerHitsQueryParserHelper.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.query.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; @@ -73,7 +72,7 @@ public class InnerHitsQueryParserHelper { } } } catch (Exception e) { - throw new QueryParsingException(parserContext.index(), "Failed to parse [_inner_hits]", e); + throw new QueryParsingException(parserContext, "Failed to parse [_inner_hits]", e); } return new Tuple<>(innerHitName, subSearchContext); } @@ -95,7 +94,7 @@ public class InnerHitsQueryParserHelper { scriptFieldsParseElement.parse(parser, subSearchContext); break; default: - throw new ElasticsearchIllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); + throw new IllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); } } else if (token == XContentParser.Token.START_ARRAY) { switch (fieldName) { @@ -115,7 +114,7 @@ public class InnerHitsQueryParserHelper { } break; default: - throw new ElasticsearchIllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); + throw new IllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); } } else if (token.isValue()) { switch (fieldName) { @@ -139,7 +138,7 @@ public class InnerHitsQueryParserHelper { subSearchContext.fieldNames().add(parser.text()); break; default: - throw new ElasticsearchIllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); + throw new IllegalArgumentException("Unknown key for a " + token + " for nested query: [" + fieldName + "]."); } } } diff --git a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java index 07142e5e1e6..c96fdb7e103 100644 --- a/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java +++ b/src/main/java/org/elasticsearch/index/query/support/NestedInnerQueryParseSupport.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; @@ -31,7 +32,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.QueryParseContext; import org.elasticsearch.index.query.QueryParsingException; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -106,10 +106,10 @@ public class NestedInnerQueryParseSupport { return innerQuery; } else { if (path == null) { - throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field"); + throw new QueryParsingException(parseContext, "[nested] requires 'path' field"); } if (!queryFound) { - throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[nested] requires either 'query' or 'filter' field"); } XContentParser old = parseContext.parser(); @@ -135,10 +135,10 @@ public class NestedInnerQueryParseSupport { return innerFilter; } else { if (path == null) { - throw new QueryParsingException(parseContext.index(), "[nested] requires 'path' field"); + throw new QueryParsingException(parseContext, "[nested] requires 'path' field"); } if (!filterFound) { - throw new QueryParsingException(parseContext.index(), "[nested] requires either 'query' or 'filter' field"); + throw new QueryParsingException(parseContext, "[nested] requires either 'query' or 'filter' field"); } setPathLevel(); @@ -160,15 +160,15 @@ public class NestedInnerQueryParseSupport { this.path = path; MapperService.SmartNameObjectMapper smart = parseContext.smartObjectMapper(path); if (smart == null) { - throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]"); + throw new QueryParsingException(parseContext, "[nested] failed to find nested object under path [" + path + "]"); } childDocumentMapper = smart.docMapper(); nestedObjectMapper = smart.mapper(); if (nestedObjectMapper == null) { - throw new QueryParsingException(parseContext.index(), "[nested] failed to find nested object under path [" + path + "]"); + throw new QueryParsingException(parseContext, "[nested] failed to find nested object under path [" + path + "]"); } if (!nestedObjectMapper.nested().isNested()) { - throw new QueryParsingException(parseContext.index(), "[nested] nested object under path [" + path + "] is not of nested type"); + throw new QueryParsingException(parseContext, "[nested] nested object under path [" + path + "] is not of nested type"); } } @@ -195,7 +195,7 @@ public class NestedInnerQueryParseSupport { private void setPathLevel() { ObjectMapper objectMapper = parseContext.nestedScope().getObjectMapper(); if (objectMapper == null) { - parentFilter = parseContext.bitsetFilter(NonNestedDocsFilter.INSTANCE); + parentFilter = parseContext.bitsetFilter(Queries.newNonNestedFilter()); } else { parentFilter = parseContext.bitsetFilter(objectMapper.nestedTypeFilter()); } diff --git a/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java b/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java index 86b9e371784..7d882090b37 100644 --- a/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java +++ b/src/main/java/org/elasticsearch/index/query/support/QueryParsers.java @@ -19,16 +19,9 @@ package org.elasticsearch.index.query.support; -import com.google.common.collect.ImmutableList; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.MultiTermQuery; -import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; + import org.elasticsearch.common.Nullable; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.query.QueryParseContext; /** * @@ -89,7 +82,7 @@ public final class QueryParsers { int size = Integer.parseInt(rewriteMethod.substring("topTerms".length())); return new MultiTermQuery.TopTermsScoringBooleanQueryRewrite(size); } - throw new ElasticsearchIllegalArgumentException("Failed to parse rewrite_method [" + rewriteMethod + "]"); + throw new IllegalArgumentException("Failed to parse rewrite_method [" + rewriteMethod + "]"); } } diff --git a/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java b/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java index b1bc01d599a..b44bde505c4 100644 --- a/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java +++ b/src/main/java/org/elasticsearch/index/search/FieldDataTermsFilter.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.search; import java.io.IOException; +import java.util.Objects; import com.carrotsearch.hppc.DoubleOpenHashSet; import com.carrotsearch.hppc.LongOpenHashSet; @@ -86,16 +87,19 @@ public abstract class FieldDataTermsFilter extends Filter { @Override public boolean equals(Object obj) { if (this == obj) return true; - if (obj == null || !(obj instanceof FieldDataTermsFilter)) return false; + if (super.equals(obj) == false) return false; FieldDataTermsFilter that = (FieldDataTermsFilter) obj; if (!fieldData.getFieldNames().indexName().equals(that.fieldData.getFieldNames().indexName())) return false; - if (this.hashCode() != obj.hashCode()) return false; return true; } @Override - public abstract int hashCode(); + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + fieldData.getFieldNames().indexName().hashCode(); + return h; + } /** * Filters on non-numeric fields. @@ -109,11 +113,17 @@ public abstract class FieldDataTermsFilter extends Filter { this.terms = terms; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + return Objects.equals(terms, ((BytesFieldDataFilter) obj).terms); + } + @Override public int hashCode() { - int hashcode = fieldData.getFieldNames().indexName().hashCode(); - hashcode += terms != null ? terms.hashCode() : 0; - return hashcode; + return 31 * super.hashCode() + Objects.hashCode(terms); } @Override @@ -166,11 +176,17 @@ public abstract class FieldDataTermsFilter extends Filter { this.terms = terms; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + return Objects.equals(terms, ((BytesFieldDataFilter) obj).terms); + } + @Override public int hashCode() { - int hashcode = fieldData.getFieldNames().indexName().hashCode(); - hashcode += terms != null ? terms.hashCode() : 0; - return hashcode; + return 31 * super.hashCode() + Objects.hashCode(terms); } @Override @@ -225,11 +241,17 @@ public abstract class FieldDataTermsFilter extends Filter { this.terms = terms; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + return Objects.equals(terms, ((BytesFieldDataFilter) obj).terms); + } + @Override public int hashCode() { - int hashcode = fieldData.getFieldNames().indexName().hashCode(); - hashcode += terms != null ? terms.hashCode() : 0; - return hashcode; + return 31 * super.hashCode() + Objects.hashCode(terms); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/MatchQuery.java b/src/main/java/org/elasticsearch/index/search/MatchQuery.java index 730719739cd..784f4ce18e3 100644 --- a/src/main/java/org/elasticsearch/index/search/MatchQuery.java +++ b/src/main/java/org/elasticsearch/index/search/MatchQuery.java @@ -25,8 +25,6 @@ import org.apache.lucene.queries.ExtendedCommonTermsQuery; import org.apache.lucene.search.*; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.util.QueryBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery; import org.elasticsearch.common.lucene.search.Queries; @@ -156,7 +154,7 @@ public class MatchQuery { } else { analyzer = parseContext.mapperService().analysisService().analyzer(this.analyzer); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("No analyzer found for [" + this.analyzer + "]"); + throw new IllegalArgumentException("No analyzer found for [" + this.analyzer + "]"); } } return analyzer; @@ -204,7 +202,7 @@ public class MatchQuery { query = builder.createPhrasePrefixQuery(field, value.toString(), phraseSlop, maxExpansions); break; default: - throw new ElasticsearchIllegalStateException("No type found for [" + type + "]"); + throw new IllegalStateException("No type found for [" + type + "]"); } if (query == null) { @@ -303,4 +301,4 @@ public class MatchQuery { return new TermQuery(term); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java b/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java index 0fdf44add48..f9a1e76034d 100644 --- a/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java +++ b/src/main/java/org/elasticsearch/index/search/MultiMatchQuery.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.FieldMapper; @@ -83,7 +82,7 @@ public class MultiMatchQuery extends MatchQuery { queryBuilder = new CrossFieldsQueryBuilder(tieBreaker); break; default: - throw new ElasticsearchIllegalStateException("No such type: " + type); + throw new IllegalStateException("No such type: " + type); } final List queries = queryBuilder.buildGroupedQueries(type, fieldNames, value, minimumShouldMatch); return queryBuilder.conbineGrouped(queries); @@ -269,4 +268,4 @@ public class MultiMatchQuery extends MatchQuery { protected boolean forceAnalyzeQueryString() { return this.queryBuilder == null ? super.forceAnalyzeQueryString() : this.queryBuilder.forceAnalyzeQueryString(); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java b/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java index 9c032c225fb..43b279c073b 100644 --- a/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java +++ b/src/main/java/org/elasticsearch/index/search/NumericRangeFieldDataFilter.java @@ -85,7 +85,7 @@ public abstract class NumericRangeFieldDataFilter extends Filter { @Override public final boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof NumericRangeFieldDataFilter)) return false; + if (super.equals(o) == false) return false; NumericRangeFieldDataFilter other = (NumericRangeFieldDataFilter) o; if (!this.indexFieldData.getFieldNames().indexName().equals(other.indexFieldData.getFieldNames().indexName()) @@ -101,7 +101,8 @@ public abstract class NumericRangeFieldDataFilter extends Filter { @Override public final int hashCode() { - int h = indexFieldData.getFieldNames().indexName().hashCode(); + int h = super.hashCode(); + h = 31 * h + indexFieldData.getFieldNames().indexName().hashCode(); h ^= (lowerVal != null) ? lowerVal.hashCode() : 550356204; h = (h << 1) | (h >>> 31); // rotate to distinguish lower from upper h ^= (upperVal != null) ? upperVal.hashCode() : -1674416163; diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java index db1c15ddbb6..7b4faa3369e 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQuery.java @@ -38,9 +38,9 @@ import org.apache.lucene.search.XFilteredDocIdSetIterator; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; import org.elasticsearch.search.internal.SearchContext; @@ -53,55 +53,41 @@ import java.util.Set; * */ // TODO: Remove me and move the logic to ChildrenQuery when needsScore=false -public class ChildrenConstantScoreQuery extends Query { +public class ChildrenConstantScoreQuery extends IndexCacheableQuery { private final IndexParentChildFieldData parentChildIndexFieldData; - private Query originalChildQuery; + private final Query childQuery; private final String parentType; private final String childType; private final Filter parentFilter; private final int shortCircuitParentDocSet; private final BitDocIdSetFilter nonNestedDocsFilter; - private Query rewrittenChildQuery; - private IndexReader rewriteIndexReader; - public ChildrenConstantScoreQuery(IndexParentChildFieldData parentChildIndexFieldData, Query childQuery, String parentType, String childType, Filter parentFilter, int shortCircuitParentDocSet, BitDocIdSetFilter nonNestedDocsFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; this.parentFilter = parentFilter; this.parentType = parentType; this.childType = childType; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.shortCircuitParentDocSet = shortCircuitParentDocSet; this.nonNestedDocsFilter = nonNestedDocsFilter; } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewrittenChildQuery = originalChildQuery.rewrite(reader); - rewriteIndexReader = reader; + final Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + ChildrenConstantScoreQuery rewritten = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childRewritten, parentType, childType, parentFilter, shortCircuitParentDocSet, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ChildrenConstantScoreQuery q = (ChildrenConstantScoreQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal(searcher.getIndexReader()); - assert rewrittenChildQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); final long valueCount; List leaves = searcher.getIndexReader().leaves(); @@ -117,9 +103,9 @@ public class ChildrenConstantScoreQuery extends Query { return new BooleanQuery().createWeight(searcher, needsScores); } - Query childQuery = rewrittenChildQuery; IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); ParentOrdCollector collector = new ParentOrdCollector(globalIfd, valueCount, parentType); indexSearcher.search(childQuery, collector); @@ -142,12 +128,12 @@ public class ChildrenConstantScoreQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ChildrenConstantScoreQuery that = (ChildrenConstantScoreQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { @@ -156,24 +142,21 @@ public class ChildrenConstantScoreQuery extends Query { if (shortCircuitParentDocSet != that.shortCircuitParentDocSet) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + childType.hashCode(); result = 31 * result + shortCircuitParentDocSet; - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { - return "child_filter[" + childType + "/" + parentType + "](" + originalChildQuery + ')'; + return "child_filter[" + childType + "/" + parentType + "](" + childQuery + ')'; } private final class ParentWeight extends Weight { @@ -202,7 +185,7 @@ public class ChildrenConstantScoreQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java index e6f3069818d..18f004f7133 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ChildrenQuery.java @@ -36,14 +36,13 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.search.XFilteredDocIdSetIterator; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.FloatArray; import org.elasticsearch.common.util.IntArray; @@ -65,28 +64,25 @@ import java.util.Set; * all parent documents having the same uid value that is collected in the first phase are emitted as hit including * a score based on the aggregated child scores and score type. */ -public class ChildrenQuery extends Query { +public final class ChildrenQuery extends IndexCacheableQuery { protected final ParentChildIndexFieldData ifd; protected final String parentType; protected final String childType; protected final Filter parentFilter; protected final ScoreType scoreType; - protected Query originalChildQuery; + protected Query childQuery; protected final int minChildren; protected final int maxChildren; protected final int shortCircuitParentDocSet; protected final BitDocIdSetFilter nonNestedDocsFilter; - protected Query rewrittenChildQuery; - protected IndexReader rewriteIndexReader; - public ChildrenQuery(ParentChildIndexFieldData ifd, String parentType, String childType, Filter parentFilter, Query childQuery, ScoreType scoreType, int minChildren, int maxChildren, int shortCircuitParentDocSet, BitDocIdSetFilter nonNestedDocsFilter) { this.ifd = ifd; this.parentType = parentType; this.childType = childType; this.parentFilter = parentFilter; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.scoreType = scoreType; this.shortCircuitParentDocSet = shortCircuitParentDocSet; this.nonNestedDocsFilter = nonNestedDocsFilter; @@ -95,25 +91,33 @@ public class ChildrenQuery extends Query { this.maxChildren = maxChildren; } + @Override + public Query rewrite(IndexReader reader) throws IOException { + final Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + Query rewritten = new ChildrenQuery(ifd, parentType, childType, parentFilter, childRewritten, scoreType, minChildren, maxChildren, shortCircuitParentDocSet, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; + } + return super.rewrite(reader); + } + @Override public boolean equals(Object obj) { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ChildrenQuery that = (ChildrenQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } if (minChildren != that.minChildren) { return false; } @@ -125,9 +129,9 @@ public class ChildrenQuery extends Query { @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + childType.hashCode(); - result = 31 * result + Float.floatToIntBits(getBoost()); result = 31 * result + minChildren; result = 31 * result + maxChildren; return result; @@ -137,36 +141,12 @@ public class ChildrenQuery extends Query { public String toString(String field) { int max = maxChildren == 0 ? Integer.MAX_VALUE : maxChildren; return "ChildrenQuery[min(" + Integer.toString(minChildren) + ") max(" + Integer.toString(max) + ")of " + childType + "/" - + parentType + "](" + originalChildQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); + + parentType + "](" + childQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); } @Override - // See TopChildrenQuery#rewrite - public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewriteIndexReader = reader; - rewrittenChildQuery = originalChildQuery.rewrite(reader); - } - return this; - } - - @Override - public Query clone() { - ChildrenQuery q = (ChildrenQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); - assert rewrittenChildQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader - + " searcher.getIndexReader()=" + searcher.getIndexReader(); - final Query childQuery = rewrittenChildQuery; IndexParentChildFieldData globalIfd = ifd.loadGlobal(searcher.getIndexReader()); if (globalIfd == null) { @@ -175,6 +155,7 @@ public class ChildrenQuery extends Query { } IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); boolean abort = true; long numFoundParents; @@ -232,7 +213,7 @@ public class ChildrenQuery extends Query { } else { parentFilter = this.parentFilter; } - return new ParentWeight(this, rewrittenChildQuery.createWeight(searcher, needsScores), parentFilter, numFoundParents, collector, minChildren, + return new ParentWeight(this, childQuery.createWeight(searcher, needsScores), parentFilter, numFoundParents, collector, minChildren, maxChildren); } @@ -264,7 +245,7 @@ public class ChildrenQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override @@ -380,7 +361,7 @@ public class ChildrenQuery extends Query { } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(parentIdxs); } } @@ -406,7 +387,7 @@ public class ChildrenQuery extends Query { } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(parentIdxs, scores); } } @@ -429,7 +410,7 @@ public class ChildrenQuery extends Query { } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(parentIdxs, scores, occurrences); } } @@ -455,7 +436,7 @@ public class ChildrenQuery extends Query { } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(parentIdxs, occurrences); } } diff --git a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java b/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java deleted file mode 100644 index 2bdf5ff87b2..00000000000 --- a/src/main/java/org/elasticsearch/index/search/child/CustomQueryWrappingFilter.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.search.child; - -import org.apache.lucene.index.LeafReader; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.*; -import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.common.lucene.search.NoCacheFilter; -import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.internal.SearchContext.Lifetime; - -import java.io.IOException; -import java.util.IdentityHashMap; - -/** - * Forked from {@link QueryWrapperFilter} to make sure the weight is only created once. - * This filter should never be cached! This filter only exists for internal usage. - * - * @elasticsearch.internal - */ -public class CustomQueryWrappingFilter extends NoCacheFilter implements Releasable { - - private final Query query; - - private IndexSearcher searcher; - private IdentityHashMap docIdSets; - - /** Constructs a filter which only matches documents matching - * query. - */ - public CustomQueryWrappingFilter(Query query) { - if (query == null) - throw new NullPointerException("Query may not be null"); - this.query = query; - } - - /** returns the inner Query */ - public final Query getQuery() { - return query; - } - - @Override - public DocIdSet getDocIdSet(final LeafReaderContext context, final Bits acceptDocs) throws IOException { - final SearchContext searchContext = SearchContext.current(); - if (docIdSets == null) { - assert searcher == null; - IndexSearcher searcher = searchContext.searcher(); - docIdSets = new IdentityHashMap<>(); - this.searcher = searcher; - searchContext.addReleasable(this, Lifetime.COLLECTION); - - final Weight weight = searcher.createNormalizedWeight(query, false); - for (final LeafReaderContext leaf : searcher.getTopReaderContext().leaves()) { - final DocIdSet set = new DocIdSet() { - @Override - public DocIdSetIterator iterator() throws IOException { - return weight.scorer(leaf, null); - } - @Override - public boolean isCacheable() { return false; } - - @Override - public long ramBytesUsed() { - return 0; - } - }; - docIdSets.put(leaf.reader(), set); - } - } else { - assert searcher == SearchContext.current().searcher(); - } - final DocIdSet set = docIdSets.get(context.reader()); - return BitsFilteredDocIdSet.wrap(set, acceptDocs); - } - - @Override - public void close() throws ElasticsearchException { - // We need to clear the docIdSets, otherwise this is leaved unused - // DocIdSets around and can potentially become a memory leak. - docIdSets = null; - searcher = null; - } - - @Override - public String toString(String field) { - return "CustomQueryWrappingFilter(" + query + ")"; - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (o != null && o instanceof CustomQueryWrappingFilter && - this.query.equals(((CustomQueryWrappingFilter)o).query)) { - return true; - } - - return false; - } - - @Override - public int hashCode() { - return query.hashCode() ^ 0x823D64C9; - } - - /** @return Whether {@link CustomQueryWrappingFilter} should be used. */ - public static boolean shouldUseCustomQueryWrappingFilter(Query query) { - if (query instanceof TopChildrenQuery || query instanceof ChildrenConstantScoreQuery - || query instanceof ChildrenQuery || query instanceof ParentConstantScoreQuery - || query instanceof ParentQuery) { - return true; - } else { - return false; - } - } -} diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java index 3617ab29a89..5d2d1101ff7 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentConstantScoreQuery.java @@ -22,12 +22,21 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.index.Term; -import org.apache.lucene.search.*; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.FilteredDocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.apache.lucene.util.LongBitSet; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.AtomicParentChildFieldData; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; @@ -39,48 +48,34 @@ import java.util.Set; /** * A query that only return child documents that are linked to the parent documents that matched with the inner query. */ -public class ParentConstantScoreQuery extends Query { +public class ParentConstantScoreQuery extends IndexCacheableQuery { private final ParentChildIndexFieldData parentChildIndexFieldData; - private Query originalParentQuery; + private Query parentQuery; private final String parentType; private final Filter childrenFilter; - private Query rewrittenParentQuery; - private IndexReader rewriteIndexReader; - public ParentConstantScoreQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalParentQuery = parentQuery; + this.parentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenParentQuery == null) { - rewrittenParentQuery = originalParentQuery.rewrite(reader); - rewriteIndexReader = reader; + Query parentRewritten = parentQuery.rewrite(reader); + if (parentRewritten != parentQuery) { + Query rewritten = new ParentConstantScoreQuery(parentChildIndexFieldData, parentRewritten, parentType, childrenFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ParentConstantScoreQuery q = (ParentConstantScoreQuery) super.clone(); - q.originalParentQuery = originalParentQuery.clone(); - if (q.rewrittenParentQuery != null) { - q.rewrittenParentQuery = rewrittenParentQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { IndexParentChildFieldData globalIfd = parentChildIndexFieldData.loadGlobal(searcher.getIndexReader()); - assert rewrittenParentQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); final long maxOrd; List leaves = searcher.getIndexReader().leaves(); @@ -96,10 +91,10 @@ public class ParentConstantScoreQuery extends Query { return new BooleanQuery().createWeight(searcher, needsScores); } - final Query parentQuery = rewrittenParentQuery; ParentOrdsCollector collector = new ParentOrdsCollector(globalIfd, maxOrd, parentType); IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); indexSearcher.search(parentQuery, collector); if (collector.parentCount() == 0) { @@ -111,9 +106,9 @@ public class ParentConstantScoreQuery extends Query { @Override public int hashCode() { - int result = originalParentQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + parentQuery.hashCode(); result = 31 * result + parentType.hashCode(); - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @@ -122,26 +117,23 @@ public class ParentConstantScoreQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ParentConstantScoreQuery that = (ParentConstantScoreQuery) obj; - if (!originalParentQuery.equals(that.originalParentQuery)) { + if (!parentQuery.equals(that.parentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } - if (this.getBoost() != that.getBoost()) { - return false; - } return true; } @Override public String toString(String field) { - return "parent_filter[" + parentType + "](" + originalParentQuery + ')'; + return "parent_filter[" + parentType + "](" + parentQuery + ')'; } private final class ChildrenWeight extends Weight { @@ -166,7 +158,7 @@ public class ParentConstantScoreQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java index 0b437a83b9e..cc34da404bb 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentIdsFilter.java @@ -29,6 +29,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; @@ -40,7 +41,6 @@ import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LongBitSet; import org.apache.lucene.util.SparseFixedBitSet; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.index.mapper.Uid; @@ -66,9 +66,9 @@ final class ParentIdsFilter extends Filter { BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))), Occur.MUST); bq.add(nonNestedDocsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); + return new QueryWrapperFilter(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); } } else { BytesRefHash parentIds= null; @@ -98,9 +98,9 @@ final class ParentIdsFilter extends Filter { BooleanQuery bq = new BooleanQuery(); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id))), Occur.MUST); bq.add(nonNestedDocsFilter, Occur.MUST); - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } else { - return Queries.wrap(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); + return new QueryWrapperFilter(new TermQuery(new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(parentType, id)))); } } else { BytesRefHash parentIds = null; @@ -195,4 +195,24 @@ final class ParentIdsFilter extends Filter { public String toString(String field) { return "parentsFilter(type=" + parentTypeBr.utf8ToString() + ")"; } + + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + ParentIdsFilter other = (ParentIdsFilter) obj; + return parentTypeBr.equals(other.parentTypeBr) + && parentIds.equals(other.parentIds) + && nonNestedDocsFilter.equals(nonNestedDocsFilter); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + parentTypeBr.hashCode(); + h = 31 * h + parentIds.hashCode(); + h = 31 * h + nonNestedDocsFilter.hashCode(); + return h; + } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java index 5b9f22ace70..ec3ed4862e8 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/ParentQuery.java @@ -18,17 +18,29 @@ */ package org.elasticsearch.index.search.child; -import org.apache.lucene.index.*; -import org.apache.lucene.search.*; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedDocValues; +import org.apache.lucene.index.SortedSetDocValues; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.BooleanQuery; +import org.apache.lucene.search.Collector; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LeafCollector; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.ToStringUtils; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.lucene.search.NoopCollector; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.FloatArray; import org.elasticsearch.common.util.LongHash; @@ -45,19 +57,16 @@ import java.util.Set; * connects the matching parent docs to the related child documents * using the {@link ParentChildIndexFieldData}. */ -public class ParentQuery extends Query { +public class ParentQuery extends IndexCacheableQuery { private final ParentChildIndexFieldData parentChildIndexFieldData; - private Query originalParentQuery; + private Query parentQuery; private final String parentType; private final Filter childrenFilter; - private Query rewrittenParentQuery; - private IndexReader rewriteIndexReader; - public ParentQuery(ParentChildIndexFieldData parentChildIndexFieldData, Query parentQuery, String parentType, Filter childrenFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalParentQuery = parentQuery; + this.parentQuery = parentQuery; this.parentType = parentType; this.childrenFilter = childrenFilter; } @@ -67,26 +76,24 @@ public class ParentQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } ParentQuery that = (ParentQuery) obj; - if (!originalParentQuery.equals(that.originalParentQuery)) { + if (!parentQuery.equals(that.parentQuery)) { return false; } if (!parentType.equals(that.parentType)) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalParentQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + parentQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + Float.floatToIntBits(getBoost()); return result; @@ -94,31 +101,22 @@ public class ParentQuery extends Query { @Override public String toString(String field) { - return "ParentQuery[" + parentType + "](" + originalParentQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); + return "ParentQuery[" + parentType + "](" + parentQuery.toString(field) + ')' + ToStringUtils.boost(getBoost()); } @Override - // See TopChildrenQuery#rewrite public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenParentQuery == null) { - rewriteIndexReader = reader; - rewrittenParentQuery = originalParentQuery.rewrite(reader); + Query parentRewritten = parentQuery.rewrite(reader); + if (parentRewritten != parentQuery) { + Query rewritten = new ParentQuery(parentChildIndexFieldData, parentRewritten, parentType, childrenFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - return this; + return super.rewrite(reader); } @Override - public Query clone() { - ParentQuery q = (ParentQuery) super.clone(); - q.originalParentQuery = originalParentQuery.clone(); - if (q.rewrittenParentQuery != null) { - q.rewrittenParentQuery = rewrittenParentQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { SearchContext sc = SearchContext.current(); ChildWeight childWeight; boolean releaseCollectorResource = true; @@ -130,12 +128,10 @@ public class ParentQuery extends Query { } try { - assert rewrittenParentQuery != null; - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); - final Query parentQuery = rewrittenParentQuery; collector = new ParentOrdAndScoreCollector(sc, globalIfd, parentType); IndexSearcher indexSearcher = new IndexSearcher(sc.searcher().getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); indexSearcher.search(parentQuery, collector); if (collector.parentCount() == 0) { return new BooleanQuery().createWeight(searcher, needsScores); @@ -152,7 +148,7 @@ public class ParentQuery extends Query { return childWeight; } - private static class ParentOrdAndScoreCollector extends NoopCollector implements Releasable { + private static class ParentOrdAndScoreCollector implements Collector, Releasable { private final LongHash parentIdxs; private FloatArray scores; @@ -160,9 +156,6 @@ public class ParentQuery extends Query { private final BigArrays bigArrays; private final String parentType; - private Scorer scorer; - private SortedDocValues values; - ParentOrdAndScoreCollector(SearchContext searchContext, IndexParentChildFieldData globalIfd, String parentType) { this.bigArrays = searchContext.bigArrays(); this.parentIdxs = new LongHash(512, bigArrays); @@ -172,34 +165,40 @@ public class ParentQuery extends Query { } @Override - public void collect(int doc) throws IOException { - // It can happen that for particular segment no document exist for an specific type. This prevents NPE - if (values != null) { - long globalOrdinal = values.getOrd(doc); - if (globalOrdinal != SortedSetDocValues.NO_MORE_ORDS) { - long parentIdx = parentIdxs.add(globalOrdinal); - if (parentIdx >= 0) { - scores = bigArrays.grow(scores, parentIdx + 1); - scores.set(parentIdx, scorer.score()); - } else { - assert false : "parent id should only match once, since there can only be one parent doc"; + public boolean needsScores() { + return true; + } + + @Override + public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + final SortedDocValues values = globalIfd.load(context).getOrdinalsValues(parentType); + if (values == null) { + return NoopCollector.NOOP_COLLECTOR; + } + return new LeafCollector() { + Scorer scorer; + @Override + public void setScorer(Scorer scorer) throws IOException { + this.scorer = scorer; + } + @Override + public void collect(int doc) throws IOException { + long globalOrdinal = values.getOrd(doc); + if (globalOrdinal != SortedSetDocValues.NO_MORE_ORDS) { + long parentIdx = parentIdxs.add(globalOrdinal); + if (parentIdx >= 0) { + scores = bigArrays.grow(scores, parentIdx + 1); + scores.set(parentIdx, scorer.score()); + } else { + assert false : "parent id should only match once, since there can only be one parent doc"; + } } } - } + }; } @Override - public void setScorer(Scorer scorer) throws IOException { - this.scorer = scorer; - } - - @Override - protected void doSetNextReader(LeafReaderContext context) throws IOException { - values = globalIfd.load(context).getOrdinalsValues(parentType); - } - - @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(parentIdxs, scores); } @@ -232,7 +231,7 @@ public class ParentQuery extends Query { @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } @Override @@ -252,12 +251,16 @@ public class ParentQuery extends Query { if (DocIdSets.isEmpty(childrenDocSet)) { return null; } + final DocIdSetIterator childIterator = childrenDocSet.iterator(); + if (childIterator == null) { + return null; + } SortedDocValues bytesValues = globalIfd.load(context).getOrdinalsValues(parentType); if (bytesValues == null) { return null; } - return new ChildScorer(this, parentIdxs, scores, childrenDocSet.iterator(), bytesValues); + return new ChildScorer(this, parentIdxs, scores, childIterator, bytesValues); } } diff --git a/src/main/java/org/elasticsearch/index/search/child/ScoreType.java b/src/main/java/org/elasticsearch/index/search/child/ScoreType.java index e0b84a14274..b9ea6281157 100644 --- a/src/main/java/org/elasticsearch/index/search/child/ScoreType.java +++ b/src/main/java/org/elasticsearch/index/search/child/ScoreType.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.search.child; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * Defines how scores from child documents are mapped into the parent document. @@ -66,7 +65,7 @@ public enum ScoreType { } else if ("total".equals(type)) { // This name is consistent with: ScoreMode.Total return SUM; } - throw new ElasticsearchIllegalArgumentException("No score type for child query [" + type + "] found"); + throw new IllegalArgumentException("No score type for child query [" + type + "] found"); } } diff --git a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java index 7ca36745761..4fc233b21b9 100644 --- a/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java +++ b/src/main/java/org/elasticsearch/index/search/child/TopChildrenQuery.java @@ -25,8 +25,8 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lucene.IndexCacheableQuery; import org.elasticsearch.common.lucene.search.EmptyScorer; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.elasticsearch.index.fielddata.IndexParentChildFieldData; @@ -54,7 +54,7 @@ import java.util.Set; * This query is most of the times faster than the {@link ChildrenQuery}. Usually enough parent documents can be returned * in the first child document query round. */ -public class TopChildrenQuery extends Query { +public class TopChildrenQuery extends IndexCacheableQuery { private static final ParentDocComparator PARENT_DOC_COMP = new ParentDocComparator(); @@ -64,17 +64,13 @@ public class TopChildrenQuery extends Query { private final ScoreType scoreType; private final int factor; private final int incrementalFactor; - private Query originalChildQuery; + private Query childQuery; private final BitDocIdSetFilter nonNestedDocsFilter; - // This field will hold the rewritten form of originalChildQuery, so that we can reuse it - private Query rewrittenChildQuery; - private IndexReader rewriteIndexReader; - // Note, the query is expected to already be filtered to only child type docs public TopChildrenQuery(IndexParentChildFieldData parentChildIndexFieldData, Query childQuery, String childType, String parentType, ScoreType scoreType, int factor, int incrementalFactor, BitDocIdSetFilter nonNestedDocsFilter) { this.parentChildIndexFieldData = parentChildIndexFieldData; - this.originalChildQuery = childQuery; + this.childQuery = childQuery; this.childType = childType; this.parentType = parentType; this.scoreType = scoreType; @@ -83,35 +79,19 @@ public class TopChildrenQuery extends Query { this.nonNestedDocsFilter = nonNestedDocsFilter; } - // Rewrite invocation logic: - // 1) query_then|and_fetch (default): Rewrite is execute as part of the createWeight invocation, when search child docs. - // 2) dfs_query_then|and_fetch:: First rewrite and then createWeight is executed. During query phase rewrite isn't - // executed any more because searchContext#queryRewritten() returns true. @Override public Query rewrite(IndexReader reader) throws IOException { - if (rewrittenChildQuery == null) { - rewrittenChildQuery = originalChildQuery.rewrite(reader); - rewriteIndexReader = reader; + Query childRewritten = childQuery.rewrite(reader); + if (childRewritten != childQuery) { + Query rewritten = new TopChildrenQuery(parentChildIndexFieldData, childRewritten, childType, parentType, scoreType, factor, incrementalFactor, nonNestedDocsFilter); + rewritten.setBoost(getBoost()); + return rewritten; } - // We can always return the current instance, and we can do this b/c the child query is executed separately - // before the main query (other scope) in a different IS#search() invocation than the main query. - // In fact we only need override the rewrite method because for the dfs phase, to get also global document - // frequency for the child query. - return this; + return super.rewrite(reader); } @Override - public Query clone() { - TopChildrenQuery q = (TopChildrenQuery) super.clone(); - q.originalChildQuery = originalChildQuery.clone(); - if (q.rewrittenChildQuery != null) { - q.rewrittenChildQuery = rewrittenChildQuery.clone(); - } - return q; - } - - @Override - public Weight createWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { ObjectObjectOpenHashMap parentDocs = new ObjectObjectOpenHashMap<>(); SearchContext searchContext = SearchContext.current(); @@ -122,16 +102,9 @@ public class TopChildrenQuery extends Query { } int numChildDocs = requestedDocs * factor; - Query childQuery; - if (rewrittenChildQuery == null) { - childQuery = rewrittenChildQuery = searcher.rewrite(originalChildQuery); - } else { - assert rewriteIndexReader == searcher.getIndexReader() : "not equal, rewriteIndexReader=" + rewriteIndexReader + " searcher.getIndexReader()=" + searcher.getIndexReader(); - childQuery = rewrittenChildQuery; - } - IndexSearcher indexSearcher = new IndexSearcher(searcher.getIndexReader()); indexSearcher.setSimilarity(searcher.getSimilarity()); + indexSearcher.setQueryCache(null); while (true) { parentDocs.clear(); TopDocs topChildDocs = indexSearcher.search(childQuery, numChildDocs); @@ -156,7 +129,7 @@ public class TopChildrenQuery extends Query { } } - ParentWeight parentWeight = new ParentWeight(this, rewrittenChildQuery.createWeight(searcher, needsScores), parentDocs); + ParentWeight parentWeight = new ParentWeight(this, childQuery.createWeight(searcher, needsScores), parentDocs); searchContext.addReleasable(parentWeight, Lifetime.COLLECTION); return parentWeight; } @@ -252,12 +225,12 @@ public class TopChildrenQuery extends Query { if (this == obj) { return true; } - if (obj == null || obj.getClass() != this.getClass()) { + if (super.equals(obj) == false) { return false; } TopChildrenQuery that = (TopChildrenQuery) obj; - if (!originalChildQuery.equals(that.originalChildQuery)) { + if (!childQuery.equals(that.childQuery)) { return false; } if (!childType.equals(that.childType)) { @@ -266,25 +239,22 @@ public class TopChildrenQuery extends Query { if (incrementalFactor != that.incrementalFactor) { return false; } - if (getBoost() != that.getBoost()) { - return false; - } return true; } @Override public int hashCode() { - int result = originalChildQuery.hashCode(); + int result = super.hashCode(); + result = 31 * result + childQuery.hashCode(); result = 31 * result + parentType.hashCode(); result = 31 * result + incrementalFactor; - result = 31 * result + Float.floatToIntBits(getBoost()); return result; } @Override public String toString(String field) { StringBuilder sb = new StringBuilder(); - sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(originalChildQuery.toString(field)).append(')'); + sb.append("score_child[").append(childType).append("/").append(parentType).append("](").append(childQuery.toString(field)).append(')'); sb.append(ToStringUtils.boost(getBoost())); return sb.toString(); } @@ -317,7 +287,7 @@ public class TopChildrenQuery extends Query { } @Override - public void close() throws ElasticsearchException { + public void close() { } @Override @@ -361,14 +331,14 @@ public class TopChildrenQuery extends Query { }; } - throw new ElasticsearchIllegalStateException("No support for score type [" + scoreType + "]"); + throw new IllegalStateException("No support for score type [" + scoreType + "]"); } return new EmptyScorer(this); } @Override public Explanation explain(LeafReaderContext context, int doc) throws IOException { - return new Explanation(getBoost(), "not implemented yet..."); + return Explanation.match(getBoost(), "not implemented yet..."); } } diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java index 014a69fed12..f5cbe4f3423 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceFilter.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; @@ -73,7 +72,7 @@ public class GeoDistanceFilter extends Filter { boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper); distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter } else { - throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); + throw new IllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); } } else { distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; @@ -123,7 +122,7 @@ public class GeoDistanceFilter extends Filter { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; GeoDistanceFilter filter = (GeoDistanceFilter) o; @@ -144,10 +143,10 @@ public class GeoDistanceFilter extends Filter { @Override public int hashCode() { - int result; + int result = super.hashCode(); long temp; temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L; - result = (int) (temp ^ (temp >>> 32)); + result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L; result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = distance != +0.0d ? Double.doubleToLongBits(distance) : 0L; diff --git a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java index a48760657d3..93e688427bb 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/GeoDistanceRangeFilter.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.DocValuesDocIdSet; import org.apache.lucene.search.Filter; import org.apache.lucene.util.Bits; import org.apache.lucene.util.NumericUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; @@ -92,7 +91,7 @@ public class GeoDistanceRangeFilter extends Filter { boundingBoxFilter = IndexedGeoBoundingBoxFilter.create(distanceBoundingCheck.topLeft(), distanceBoundingCheck.bottomRight(), mapper); distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; // fine, we do the bounding box check using the filter } else { - throw new ElasticsearchIllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); + throw new IllegalArgumentException("type [" + optimizeBbox + "] for bounding box optimization not supported"); } } else { distanceBoundingCheck = GeoDistance.ALWAYS_INSTANCE; @@ -133,7 +132,7 @@ public class GeoDistanceRangeFilter extends Filter { @Override public boolean equals(Object o) { if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (super.equals(o) == false) return false; GeoDistanceRangeFilter filter = (GeoDistanceRangeFilter) o; @@ -155,10 +154,10 @@ public class GeoDistanceRangeFilter extends Filter { @Override public int hashCode() { - int result; + int result = super.hashCode(); long temp; temp = lat != +0.0d ? Double.doubleToLongBits(lat) : 0L; - result = (int) (temp ^ (temp >>> 32)); + result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = lon != +0.0d ? Double.doubleToLongBits(lon) : 0L; result = 31 * result + (int) (temp ^ (temp >>> 32)); temp = inclusiveLowerPoint != +0.0d ? Double.doubleToLongBits(inclusiveLowerPoint) : 0L; diff --git a/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java index 2a2b99a5b0c..8b769e42849 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/InMemoryGeoBoundingBoxFilter.java @@ -76,6 +76,26 @@ public class InMemoryGeoBoundingBoxFilter extends Filter { return "GeoBoundingBoxFilter(" + indexFieldData.getFieldNames().indexName() + ", " + topLeft + ", " + bottomRight + ")"; } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + InMemoryGeoBoundingBoxFilter other = (InMemoryGeoBoundingBoxFilter) obj; + return fieldName().equalsIgnoreCase(other.fieldName()) + && topLeft.equals(other.topLeft) + && bottomRight.equals(other.bottomRight); + } + + @Override + public int hashCode() { + int h = super.hashCode(); + h = 31 * h + fieldName().hashCode(); + h = 31 * h + topLeft.hashCode(); + h = 31 * h + bottomRight.hashCode(); + return h; + } + public static class Meridian180GeoBoundingBoxDocSet extends DocValuesDocIdSet { private final MultiGeoPointValues values; private final GeoPoint topLeft; diff --git a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java index 6c57c251771..880b1f54254 100644 --- a/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java +++ b/src/main/java/org/elasticsearch/index/search/geo/IndexedGeoBoundingBoxFilter.java @@ -22,9 +22,8 @@ package org.elasticsearch.index.search.geo; import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Filter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.apache.lucene.search.QueryWrapperFilter; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; /** @@ -33,7 +32,7 @@ public class IndexedGeoBoundingBoxFilter { public static Filter create(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) { if (!fieldMapper.isEnableLatLon()) { - throw new ElasticsearchIllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldMapper.name() + "], can't use indexed filter on it"); + throw new IllegalArgumentException("lat/lon is not enabled (indexed) for field [" + fieldMapper.name() + "], can't use indexed filter on it"); } //checks to see if bounding box crosses 180 degrees if (topLeft.lon() > bottomRight.lon()) { @@ -49,13 +48,13 @@ public class IndexedGeoBoundingBoxFilter { filter.add(fieldMapper.lonMapper().rangeFilter(null, bottomRight.lon(), true, true), Occur.SHOULD); filter.add(fieldMapper.lonMapper().rangeFilter(topLeft.lon(), null, true, true), Occur.SHOULD); filter.add(fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } private static Filter eastGeoBoundingBoxFilter(GeoPoint topLeft, GeoPoint bottomRight, GeoPointFieldMapper fieldMapper) { BooleanQuery filter = new BooleanQuery(); filter.add(fieldMapper.lonMapper().rangeFilter(topLeft.lon(), bottomRight.lon(), true, true), Occur.MUST); filter.add(fieldMapper.latMapper().rangeFilter(bottomRight.lat(), topLeft.lat(), true, true), Occur.MUST); - return Queries.wrap(filter); + return new QueryWrapperFilter(filter); } } diff --git a/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java b/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java deleted file mode 100644 index 12f35f26b25..00000000000 --- a/src/main/java/org/elasticsearch/index/search/nested/NonNestedDocsFilter.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.search.nested; - -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.Term; -import org.apache.lucene.search.DocIdSet; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.PrefixQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryWrapperFilter; -import org.apache.lucene.util.Bits; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.lucene.search.Queries; -import org.elasticsearch.index.mapper.internal.TypeFieldMapper; - -import java.io.IOException; - -/** - * A filter that returns all root (non nested) documents. - * - * Root documents have an unique id, a type and optionally have a _source and other indexed and stored fields. - * A nested document is a sub documents that belong to a root document. - * Nested documents share the unique id and type and optionally the _source with root documents. - */ -public final class NonNestedDocsFilter extends Filter { - - public static final NonNestedDocsFilter INSTANCE = new NonNestedDocsFilter(); - - private final Filter filter = Queries.wrap(Queries.not(nestedFilter())); - private final int hashCode = filter.hashCode(); - - private NonNestedDocsFilter() { - } - - @Override - public Query clone() { - return INSTANCE; - } - - @Override - public DocIdSet getDocIdSet(LeafReaderContext context, Bits acceptDocs) throws IOException { - return filter.getDocIdSet(context, acceptDocs); - } - - @Override - public int hashCode() { - return hashCode; - } - - @Override - public boolean equals(Object obj) { - return obj == INSTANCE; - } - - @Override - public String toString(String field) { - return "NonNestedDocsFilter"; - } - - /** - * @return a filter that returns all nested documents. - */ - private static Filter nestedFilter() { - return Queries.wrap(new PrefixQuery(new Term(TypeFieldMapper.NAME, new BytesRef("__")))); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java b/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java index bf8aa8eb4b3..dbe42131d1e 100644 --- a/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java +++ b/src/main/java/org/elasticsearch/index/search/shape/ShapeFetchService.java @@ -19,8 +19,6 @@ package org.elasticsearch.index.search.shape; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.client.Client; @@ -60,7 +58,7 @@ public class ShapeFetchService extends AbstractComponent { public ShapeBuilder fetch(String id, String type, String index, String path) throws IOException { GetResponse response = client.get(new GetRequest(index, type, id).preference("_local").operationThreaded(false)).actionGet(); if (!response.isExists()) { - throw new ElasticsearchIllegalArgumentException("Shape with ID [" + id + "] in type [" + type + "] not found"); + throw new IllegalArgumentException("Shape with ID [" + id + "] in type [" + type + "] not found"); } String[] pathElements = Strings.splitStringToArray(path, '.'); @@ -83,7 +81,7 @@ public class ShapeFetchService extends AbstractComponent { } } } - throw new ElasticsearchIllegalStateException("Shape with name [" + id + "] found but missing " + path + " field"); + throw new IllegalStateException("Shape with name [" + id + "] found but missing " + path + " field"); } finally { if (parser != null) { parser.close(); diff --git a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java index 91f8ac4f3bf..3fa975a31ed 100644 --- a/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java +++ b/src/main/java/org/elasticsearch/index/settings/IndexDynamicSettingsModule.java @@ -36,7 +36,7 @@ import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.search.slowlog.ShardSlowLogSearchService; import org.elasticsearch.index.shard.IndexShard; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.indices.IndicesWarmer; @@ -51,8 +51,8 @@ public class IndexDynamicSettingsModule extends AbstractModule { public IndexDynamicSettingsModule() { indexDynamicSettings = new DynamicSettings(); - indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); - indexDynamicSettings.addDynamicSetting(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE); + indexDynamicSettings.addDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, Validator.BYTES_SIZE); + indexDynamicSettings.addDynamicSetting(IndexStore.INDEX_STORE_THROTTLE_TYPE); indexDynamicSettings.addDynamicSetting(ConcurrentMergeSchedulerProvider.MAX_THREAD_COUNT); indexDynamicSettings.addDynamicSetting(ConcurrentMergeSchedulerProvider.MAX_MERGE_COUNT); indexDynamicSettings.addDynamicSetting(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE); diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/src/main/java/org/elasticsearch/index/shard/IndexShard.java index c04ec70747d..a59d938d5a9 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -20,6 +20,8 @@ package org.elasticsearch.index.shard; import com.google.common.base.Charsets; + +import com.google.common.base.Preconditions; import org.apache.lucene.codecs.PostingsFormat; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.search.Filter; @@ -30,8 +32,6 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.ThreadInterruptedException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.optimize.OptimizeRequest; @@ -43,13 +43,13 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Preconditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; @@ -69,7 +69,14 @@ import org.elasticsearch.index.cache.query.ShardQueryCache; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.deletionpolicy.SnapshotDeletionPolicy; import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit; -import org.elasticsearch.index.engine.*; +import org.elasticsearch.index.engine.CommitStats; +import org.elasticsearch.index.engine.Engine; +import org.elasticsearch.index.engine.EngineClosedException; +import org.elasticsearch.index.engine.EngineConfig; +import org.elasticsearch.index.engine.EngineException; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.RefreshFailedEngineException; +import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ShardFieldData; @@ -78,7 +85,12 @@ import org.elasticsearch.index.get.GetStats; import org.elasticsearch.index.get.ShardGetService; import org.elasticsearch.index.indexing.IndexingStats; import org.elasticsearch.index.indexing.ShardIndexingService; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperAnalyzer; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SourceToParse; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.merge.MergeStats; import org.elasticsearch.index.merge.policy.MergePolicyProvider; @@ -88,7 +100,6 @@ import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.recovery.RecoveryStats; import org.elasticsearch.index.refresh.RefreshStats; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.search.stats.SearchStats; import org.elasticsearch.index.search.stats.ShardSearchService; import org.elasticsearch.index.settings.IndexSettingsService; @@ -327,7 +338,7 @@ public class IndexShard extends AbstractIndexShardComponent { public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) { final ShardRouting currentRouting = this.shardRouting; if (!newRouting.shardId().equals(shardId())) { - throw new ElasticsearchIllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]"); + throw new IllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]"); } try { if (currentRouting != null) { @@ -444,11 +455,11 @@ public class IndexShard extends AbstractIndexShardComponent { return previousState; } - public Engine.Create prepareCreate(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) throws ElasticsearchException { + public Engine.Create prepareCreate(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) { return prepareCreate(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates, autoGeneratedId); } - static Engine.Create prepareCreate(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) throws ElasticsearchException { + static Engine.Create prepareCreate(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates, boolean autoGeneratedId) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.v1().parse(source); if (docMapper.v2() != null) { @@ -457,7 +468,7 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.Create(docMapper.v1(), docMapper.v1().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime, canHaveDuplicates, autoGeneratedId); } - public ParsedDocument create(Engine.Create create) throws ElasticsearchException { + public void create(Engine.Create create) { writeAllowed(create.origin()); create = indexingService.preCreate(create); mapperAnalyzer.setType(create.type()); @@ -472,14 +483,13 @@ public class IndexShard extends AbstractIndexShardComponent { throw ex; } indexingService.postCreate(create); - return create.parsedDoc(); } - public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) throws ElasticsearchException { + public Engine.Index prepareIndex(SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) { return prepareIndex(docMapper(source.type()), source, version, versionType, origin, state != IndexShardState.STARTED || canHaveDuplicates); } - static Engine.Index prepareIndex(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) throws ElasticsearchException { + static Engine.Index prepareIndex(Tuple docMapper, SourceToParse source, long version, VersionType versionType, Engine.Operation.Origin origin, boolean canHaveDuplicates) { long startTime = System.nanoTime(); ParsedDocument doc = docMapper.v1().parse(source); if (docMapper.v2() != null) { @@ -488,31 +498,36 @@ public class IndexShard extends AbstractIndexShardComponent { return new Engine.Index(docMapper.v1(), docMapper.v1().uidMapper().term(doc.uid().stringValue()), doc, version, versionType, origin, startTime, canHaveDuplicates); } - public ParsedDocument index(Engine.Index index) throws ElasticsearchException { + /** + * Index a document and return whether it was created, as opposed to just + * updated. + */ + public boolean index(Engine.Index index) { writeAllowed(index.origin()); index = indexingService.preIndex(index); mapperAnalyzer.setType(index.type()); + final boolean created; try { if (logger.isTraceEnabled()) { logger.trace("index [{}][{}]{}", index.type(), index.id(), index.docs()); } - engine().index(index); + created = engine().index(index); index.endTime(System.nanoTime()); } catch (Throwable ex) { indexingService.postIndex(index, ex); throw ex; } indexingService.postIndex(index); - return index.parsedDoc(); + return created; } - public Engine.Delete prepareDelete(String type, String id, long version, VersionType versionType, Engine.Operation.Origin origin) throws ElasticsearchException { + public Engine.Delete prepareDelete(String type, String id, long version, VersionType versionType, Engine.Operation.Origin origin) { long startTime = System.nanoTime(); final DocumentMapper documentMapper = docMapper(type).v1(); return new Engine.Delete(type, id, documentMapper.uidMapper().term(type, id), version, versionType, origin, startTime, false); } - public void delete(Engine.Delete delete) throws ElasticsearchException { + public void delete(Engine.Delete delete) { writeAllowed(delete.origin()); delete = indexingService.preDelete(delete); try { @@ -528,11 +543,11 @@ public class IndexShard extends AbstractIndexShardComponent { indexingService.postDelete(delete); } - public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) throws ElasticsearchException { + public Engine.DeleteByQuery prepareDeleteByQuery(BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) { return prepareDeleteByQuery(queryParserService, mapperService, indexAliasesService, indexCache, source, filteringAliases, origin, types); } - static Engine.DeleteByQuery prepareDeleteByQuery(IndexQueryParserService queryParserService, MapperService mapperService, IndexAliasesService indexAliasesService, IndexCache indexCache, BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) throws ElasticsearchException { + static Engine.DeleteByQuery prepareDeleteByQuery(IndexQueryParserService queryParserService, MapperService mapperService, IndexAliasesService indexAliasesService, IndexCache indexCache, BytesReference source, @Nullable String[] filteringAliases, Engine.Operation.Origin origin, String... types) { long startTime = System.nanoTime(); if (types == null) { types = Strings.EMPTY_ARRAY; @@ -540,31 +555,20 @@ public class IndexShard extends AbstractIndexShardComponent { Query query = queryParserService.parseQuery(source).query(); Filter searchFilter = mapperService.searchFilter(types); if (searchFilter != null) { - query = new FilteredQuery(query, indexCache.filter().cache(searchFilter, null, queryParserService.autoFilterCachePolicy())); + query = new FilteredQuery(query, searchFilter); } Filter aliasFilter = indexAliasesService.aliasFilter(filteringAliases); - BitDocIdSetFilter parentFilter = mapperService.hasNested() ? indexCache.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE) : null; + BitDocIdSetFilter parentFilter = mapperService.hasNested() ? indexCache.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()) : null; return new Engine.DeleteByQuery(query, source, filteringAliases, aliasFilter, parentFilter, origin, startTime, types); } - public void deleteByQuery(Engine.DeleteByQuery deleteByQuery) throws ElasticsearchException { - writeAllowed(deleteByQuery.origin()); - if (logger.isTraceEnabled()) { - logger.trace("delete_by_query [{}]", deleteByQuery.query()); - } - deleteByQuery = indexingService.preDeleteByQuery(deleteByQuery); - engine().delete(deleteByQuery); - deleteByQuery.endTime(System.nanoTime()); - indexingService.postDeleteByQuery(deleteByQuery); - } - - public Engine.GetResult get(Engine.Get get) throws ElasticsearchException { + public Engine.GetResult get(Engine.Get get) { readAllowed(); return engine().get(get); } - public void refresh(String source) throws ElasticsearchException { + public void refresh(String source) { verifyNotClosed(); if (logger.isTraceEnabled()) { logger.trace("refresh with source: {}", source); @@ -704,7 +708,7 @@ public class IndexShard extends AbstractIndexShardComponent { } - public void optimize(OptimizeRequest optimize) throws ElasticsearchException { + public void optimize(OptimizeRequest optimize) { verifyStarted(); if (logger.isTraceEnabled()) { logger.trace("optimize with {}", optimize); @@ -760,7 +764,7 @@ public class IndexShard extends AbstractIndexShardComponent { engine.flushAndClose(); } } finally { // playing safe here and close the engine even if the above succeeds - close can be called multiple times - IOUtils.close(engine); + IOUtils.close(engine, shardFilterCache); } } } @@ -785,7 +789,7 @@ public class IndexShard extends AbstractIndexShardComponent { } /** called before starting to copy index files over */ - public void prepareForIndexRecovery() throws ElasticsearchException { + public void prepareForIndexRecovery() { if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); } @@ -808,14 +812,14 @@ public class IndexShard extends AbstractIndexShardComponent { /** * After the store has been recovered, we need to start the engine in order to apply operations */ - public Map performTranslogRecovery() throws ElasticsearchException { + public Map performTranslogRecovery() { final Map recoveredTypes = internalPerformTranslogRecovery(false); assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage(); return recoveredTypes; } - private Map internalPerformTranslogRecovery(boolean skipTranslogRecovery) throws ElasticsearchException { + private Map internalPerformTranslogRecovery(boolean skipTranslogRecovery) { if (state != IndexShardState.RECOVERING) { throw new IndexShardNotRecoveringException(shardId, state); } @@ -838,7 +842,7 @@ public class IndexShard extends AbstractIndexShardComponent { * the replay of the transaction log which is required in cases where we restore a previous index or recover from * a remote peer. */ - public void skipTranslogRecovery() throws ElasticsearchException { + public void skipTranslogRecovery() { assert engineUnsafe() == null : "engine was already created"; Map recoveredTypes = internalPerformTranslogRecovery(true); assert recoveredTypes.isEmpty(); @@ -1002,7 +1006,7 @@ public class IndexShard extends AbstractIndexShardComponent { */ public void deleteShardState() throws IOException { if (this.routingEntry() != null && this.routingEntry().active()) { - throw new ElasticsearchIllegalStateException("Can't delete shard state on an active shard"); + throw new IllegalStateException("Can't delete shard state on an active shard"); } MetaDataStateFormat.deleteMetaState(shardPath().getDataPath()); } @@ -1298,6 +1302,6 @@ public class IndexShard extends AbstractIndexShardComponent { }; return new EngineConfig(shardId, threadPool, indexingService, indexSettingsService, warmer, store, deletionPolicy, translog, mergePolicyProvider, mergeScheduler, - mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer); + mapperAnalyzer, similarityService.similarity(), codecService, failedEngineListener, translogRecoveryPerformer, indexCache.filter(), indexCache.filterPolicy()); } } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java index e9217fda4cb..019b4d13bb4 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardException.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardException.java @@ -19,8 +19,11 @@ package org.elasticsearch.index.shard; +import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.IndexException; +import java.io.IOException; + /** * */ @@ -33,11 +36,24 @@ public class IndexShardException extends IndexException { } public IndexShardException(ShardId shardId, String msg, Throwable cause) { - super(shardId == null ? null : shardId.index(), false, "[" + (shardId == null ? "_na" : shardId.id()) + "] " + msg, cause); + super(shardId == null ? null : shardId.index(), msg, cause); this.shardId = shardId; } public ShardId shardId() { return shardId; } + + @Override + public String toString() { + return (shardId == null ? "_na" : shardId) + getMessage(); + } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (shardId != null) { + builder.field("shard", shardId.getId()); + } + super.innerToXContent(builder, params); + } } diff --git a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java index 1b4edcb10ba..fa4122cabba 100644 --- a/src/main/java/org/elasticsearch/index/shard/IndexShardState.java +++ b/src/main/java/org/elasticsearch/index/shard/IndexShardState.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * @@ -51,9 +50,9 @@ public enum IndexShardState { return this.id; } - public static IndexShardState fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static IndexShardState fromId(byte id) { if (id < 0 || id >= IDS.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return IDS[id]; } diff --git a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java index a68d1289ff1..ba7dcdd3976 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java +++ b/src/main/java/org/elasticsearch/index/shard/ShadowIndexShard.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.Nullable; @@ -98,7 +97,7 @@ public final class ShadowIndexShard extends IndexShard { @Override public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) { if (newRouting.primary() == true) {// becoming a primary - throw new ElasticsearchIllegalStateException("can't promote shard to primary"); + throw new IllegalStateException("can't promote shard to primary"); } super.updateRoutingEntry(newRouting, persistState); } diff --git a/src/main/java/org/elasticsearch/index/shard/ShardPath.java b/src/main/java/org/elasticsearch/index/shard/ShardPath.java index f519aa546aa..870283017c8 100644 --- a/src/main/java/org/elasticsearch/index/shard/ShardPath.java +++ b/src/main/java/org/elasticsearch/index/shard/ShardPath.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; @@ -91,12 +90,12 @@ public final class ShardPath { ShardStateMetaData load = ShardStateMetaData.FORMAT.loadLatestState(logger, path); if (load != null) { if ((load.indexUUID.equals(indexUUID) || IndexMetaData.INDEX_UUID_NA_VALUE.equals(load.indexUUID)) == false) { - throw new ElasticsearchIllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " excepted: " + indexUUID + " on shard path: " + path); + throw new IllegalStateException(shardId + " index UUID in shard state was: " + load.indexUUID + " excepted: " + indexUUID + " on shard path: " + path); } if (loadedPath == null) { loadedPath = path; } else{ - throw new ElasticsearchIllegalStateException(shardId + " more than one shard state found"); + throw new IllegalStateException(shardId + " more than one shard state found"); } } diff --git a/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java b/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java index cf933e8cf0c..b2dbc1022cf 100644 --- a/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java +++ b/src/main/java/org/elasticsearch/index/shard/TranslogRecoveryPerformer.java @@ -19,14 +19,12 @@ package org.elasticsearch.index.shard; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.index.aliases.IndexAliasesService; import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.IgnoreOnRecoveryEngineException; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperAnalyzer; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.MapperUtils; @@ -90,7 +88,7 @@ public class TranslogRecoveryPerformer { * Performs a single recovery operation, and returns the indexing operation (or null if its not an indexing operation) * that can then be used for mapping updates (for example) if needed. */ - public void performRecoveryOperation(Engine engine, Translog.Operation operation) throws ElasticsearchException { + public void performRecoveryOperation(Engine engine, Translog.Operation operation) { try { switch (operation.opType()) { case CREATE: @@ -128,7 +126,7 @@ public class TranslogRecoveryPerformer { deleteByQuery.source(), deleteByQuery.filteringAliases(), Engine.Operation.Origin.RECOVERY, deleteByQuery.types())); break; default: - throw new ElasticsearchIllegalStateException("No operation defined for [" + operation + "]"); + throw new IllegalStateException("No operation defined for [" + operation + "]"); } } catch (ElasticsearchException e) { boolean hasIgnoreOnRecoveryException = false; diff --git a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java index 43f6091a70d..566016b3da1 100644 --- a/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/AbstractSimilarityProvider.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.similarity; import org.apache.lucene.search.similarities.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.Settings; /** @@ -73,7 +72,7 @@ public abstract class AbstractSimilarityProvider implements SimilarityProvider { float z = settings.getAsFloat("normalization.z.z", 0.30f); return new NormalizationZ(z); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported Normalization [" + normalization + "]"); + throw new IllegalArgumentException("Unsupported Normalization [" + normalization + "]"); } } } diff --git a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java index c6c6a48a77b..6d30e81c091 100644 --- a/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/DFRSimilarityProvider.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; @@ -82,7 +81,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider { String basicModel = settings.get("basic_model"); BasicModel model = MODEL_CACHE.get(basicModel); if (model == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); + throw new IllegalArgumentException("Unsupported BasicModel [" + basicModel + "]"); } return model; } @@ -97,7 +96,7 @@ public class DFRSimilarityProvider extends AbstractSimilarityProvider { String afterEffect = settings.get("after_effect"); AfterEffect effect = EFFECT_CACHE.get(afterEffect); if (effect == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); + throw new IllegalArgumentException("Unsupported AfterEffect [" + afterEffect + "]"); } return effect; } diff --git a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java index c5e9d2901e5..4741247080c 100644 --- a/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java +++ b/src/main/java/org/elasticsearch/index/similarity/IBSimilarityProvider.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.ImmutableMap; import org.apache.lucene.search.similarities.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.assistedinject.Assisted; @@ -76,7 +75,7 @@ public class IBSimilarityProvider extends AbstractSimilarityProvider { String rawDistribution = settings.get("distribution"); Distribution distribution = DISTRIBUTION_CACHE.get(rawDistribution); if (distribution == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); + throw new IllegalArgumentException("Unsupported Distribution [" + rawDistribution + "]"); } return distribution; } @@ -91,7 +90,7 @@ public class IBSimilarityProvider extends AbstractSimilarityProvider { String rawLambda = settings.get("lambda"); Lambda lambda = LAMBDA_CACHE.get(rawLambda); if (lambda == null) { - throw new ElasticsearchIllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); + throw new IllegalArgumentException("Unsupported Lambda [" + rawLambda + "]"); } return lambda; } diff --git a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java index cbd7729e105..e2bc92adb3f 100644 --- a/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java +++ b/src/main/java/org/elasticsearch/index/similarity/SimilarityModule.java @@ -20,7 +20,6 @@ package org.elasticsearch.index.similarity; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Scopes; import org.elasticsearch.common.inject.assistedinject.FactoryProvider; @@ -71,7 +70,7 @@ public class SimilarityModule extends AbstractModule { Class type = settings.getAsClass("type", null, "org.elasticsearch.index.similarity.", "SimilarityProvider"); if (type == null) { - throw new ElasticsearchIllegalArgumentException("SimilarityProvider [" + name + "] must have an associated type"); + throw new IllegalArgumentException("SimilarityProvider [" + name + "] must have an associated type"); } providers.put(name, type); } diff --git a/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java similarity index 52% rename from src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java rename to src/main/java/org/elasticsearch/index/store/FsDirectoryService.java index dab9346413e..f67bc340125 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/FsDirectoryService.java +++ b/src/main/java/org/elasticsearch/index/store/FsDirectoryService.java @@ -17,13 +17,18 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.index.store; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; +import java.util.Set; +import com.google.common.collect.Sets; import org.apache.lucene.store.*; +import org.apache.lucene.util.Constants; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.metrics.CounterMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; @@ -31,19 +36,21 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.StoreException; /** */ -public abstract class FsDirectoryService extends DirectoryService implements StoreRateLimiting.Listener, StoreRateLimiting.Provider { +public class FsDirectoryService extends DirectoryService implements StoreRateLimiting.Listener, StoreRateLimiting.Provider { protected final IndexStore indexStore; private final CounterMetric rateLimitingTimeInNanos = new CounterMetric(); private final ShardPath path; - public FsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath path) { - super(shardId, indexSettings); + @Inject + public FsDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath path) { + super(path.getShardId(), indexSettings); this.path = path; this.indexStore = indexStore; } @@ -79,10 +86,45 @@ public abstract class FsDirectoryService extends DirectoryService implements Sto return new RateLimitedFSDirectory(wrapped, this, this) ; } - protected abstract Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException; @Override public void onPause(long nanos) { rateLimitingTimeInNanos.inc(nanos); } + + /* + * We are mmapping docvalues as well as term dictionaries, all other files are served through NIOFS + * this provides good random access performance while not creating unnecessary mmaps for files like stored + * fields etc. + */ + private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("dvd", "tim")); + + + protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { + final String storeType = indexSettings.get(IndexStoreModule.STORE_TYPE, IndexStoreModule.Type.DEFAULT.name()); + if (IndexStoreModule.Type.FS.match(storeType) || IndexStoreModule.Type.DEFAULT.match(storeType)) { + final FSDirectory open = FSDirectory.open(location, lockFactory); // use lucene defaults + if (open instanceof MMapDirectory && Constants.WINDOWS == false) { + return newDefaultDir(location, (MMapDirectory) open, lockFactory); + } + return open; + } else if (IndexStoreModule.Type.SIMPLEFS.match(storeType)) { + return new SimpleFSDirectory(location, lockFactory); + } else if (IndexStoreModule.Type.NIOFS.match(storeType)) { + return new NIOFSDirectory(location, lockFactory); + } else if (IndexStoreModule.Type.MMAPFS.match(storeType)) { + return new MMapDirectory(location, lockFactory); + } + throw new IllegalArgumentException("No directory found for type [" + storeType + "]"); + } + + private Directory newDefaultDir(Path location, final MMapDirectory mmapDir, LockFactory lockFactory) throws IOException { + return new FileSwitchDirectory(PRIMARY_EXTENSIONS, mmapDir, new NIOFSDirectory(location, lockFactory), true) { + @Override + public String[] listAll() throws IOException { + // Avoid doing listAll twice: + return mmapDir.listAll(); + } + }; + } } diff --git a/src/main/java/org/elasticsearch/index/store/IndexStore.java b/src/main/java/org/elasticsearch/index/store/IndexStore.java index 161b915e508..cfef4f594be 100644 --- a/src/main/java/org/elasticsearch/index/store/IndexStore.java +++ b/src/main/java/org/elasticsearch/index/store/IndexStore.java @@ -20,28 +20,101 @@ package org.elasticsearch.index.store; import org.apache.lucene.store.StoreRateLimiting; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.AbstractIndexComponent; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.settings.IndexSettingsService; +import org.elasticsearch.indices.store.IndicesStore; import java.io.Closeable; -import java.io.IOException; -import java.nio.file.Path; /** - * Index store is an index level information of the {@link Store} each shard will use. + * */ -public interface IndexStore extends Closeable { +public class IndexStore extends AbstractIndexComponent implements Closeable { + + public static final String INDEX_STORE_THROTTLE_TYPE = "index.store.throttle.type"; + public static final String INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC = "index.store.throttle.max_bytes_per_sec"; + + private final IndexSettingsService settingsService; + + class ApplySettings implements IndexSettingsService.Listener { + @Override + public void onRefreshSettings(Settings settings) { + String rateLimitingType = settings.get(INDEX_STORE_THROTTLE_TYPE, IndexStore.this.rateLimitingType); + if (!rateLimitingType.equals(IndexStore.this.rateLimitingType)) { + logger.info("updating index.store.throttle.type from [{}] to [{}]", IndexStore.this.rateLimitingType, rateLimitingType); + if (rateLimitingType.equalsIgnoreCase("node")) { + IndexStore.this.rateLimitingType = rateLimitingType; + IndexStore.this.nodeRateLimiting = true; + } else { + StoreRateLimiting.Type.fromString(rateLimitingType); + IndexStore.this.rateLimitingType = rateLimitingType; + IndexStore.this.nodeRateLimiting = false; + IndexStore.this.rateLimiting.setType(rateLimitingType); + } + } + + ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, IndexStore.this.rateLimitingThrottle); + if (!rateLimitingThrottle.equals(IndexStore.this.rateLimitingThrottle)) { + logger.info("updating index.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", IndexStore.this.rateLimitingThrottle, rateLimitingThrottle, IndexStore.this.rateLimitingType); + IndexStore.this.rateLimitingThrottle = rateLimitingThrottle; + IndexStore.this.rateLimiting.setMaxRate(rateLimitingThrottle); + } + } + } + protected final IndicesStore indicesStore; + + private volatile String rateLimitingType; + private volatile ByteSizeValue rateLimitingThrottle; + private volatile boolean nodeRateLimiting; + + private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); + + private final ApplySettings applySettings = new ApplySettings(); + + @Inject + public IndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService settingsService, IndicesStore indicesStore) { + super(index, indexSettings); + this.indicesStore = indicesStore; + + this.rateLimitingType = indexSettings.get(INDEX_STORE_THROTTLE_TYPE, "none"); + if (rateLimitingType.equalsIgnoreCase("node")) { + nodeRateLimiting = true; + } else { + nodeRateLimiting = false; + rateLimiting.setType(rateLimitingType); + } + this.rateLimitingThrottle = indexSettings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(0)); + rateLimiting.setMaxRate(rateLimitingThrottle); + + logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); + this.settingsService = settingsService; + this.settingsService.addListener(applySettings); + } + + @Override + public void close() { + settingsService.removeListener(applySettings); + } /** * Returns the rate limiting, either of the index is explicitly configured, or * the node level one (defaults to the node level one). */ - StoreRateLimiting rateLimiting(); + public StoreRateLimiting rateLimiting() { + return nodeRateLimiting ? indicesStore.rateLimiting() : this.rateLimiting; + } /** * The shard store class that should be used for each shard. */ - Class shardDirectory(); - + public Class shardDirectory() { + return FsDirectoryService.class; + } } diff --git a/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java index a4aa4d694a2..c40301d560b 100644 --- a/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java +++ b/src/main/java/org/elasticsearch/index/store/IndexStoreModule.java @@ -20,23 +20,16 @@ package org.elasticsearch.index.store; import com.google.common.collect.ImmutableList; -import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.util.Constants; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Module; -import org.elasticsearch.common.inject.Modules; -import org.elasticsearch.common.inject.SpawnModules; +import org.elasticsearch.common.inject.*; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.store.fs.DefaultFsIndexStoreModule; -import org.elasticsearch.index.store.fs.MmapFsIndexStoreModule; -import org.elasticsearch.index.store.fs.NioFsIndexStoreModule; -import org.elasticsearch.index.store.fs.SimpleFsIndexStoreModule; /** * */ public class IndexStoreModule extends AbstractModule implements SpawnModules { + public static final String STORE_TYPE = "index.store.type"; + private final Settings settings; public static enum Type { @@ -75,36 +68,23 @@ public class IndexStoreModule extends AbstractModule implements SpawnModules { @Override public Iterable spawnModules() { - Class indexStoreModule = NioFsIndexStoreModule.class; - if ((Constants.WINDOWS || Constants.SUN_OS || Constants.LINUX) - && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { - if (Constants.WINDOWS) { - indexStoreModule = MmapFsIndexStoreModule.class; - } else { - // on linux and friends we only mmap dedicated files - indexStoreModule = DefaultFsIndexStoreModule.class; + final String storeType = settings.get(STORE_TYPE, Type.DEFAULT.name()); + for (Type type : Type.values()) { + if (type.match(storeType)) { + return ImmutableList.of(new DefaultStoreModule()); } - } else if (Constants.WINDOWS) { - indexStoreModule = SimpleFsIndexStoreModule.class; - } - String storeType = settings.get("index.store.type"); - if (Type.FS.match(storeType)) { - // nothing to set here ... (we default to fs) - } else if (Type.SIMPLEFS.match(storeType)) { - indexStoreModule = SimpleFsIndexStoreModule.class; - } else if (Type.NIOFS.match(storeType)) { - indexStoreModule = NioFsIndexStoreModule.class; - } else if (Type.MMAPFS.match(storeType)) { - indexStoreModule = MmapFsIndexStoreModule.class; - } else if (Type.DEFAULT.match(storeType)) { - indexStoreModule = DefaultFsIndexStoreModule.class; - } else if (storeType != null) { - indexStoreModule = settings.getAsClass("index.store.type", indexStoreModule, "org.elasticsearch.index.store.", "IndexStoreModule"); } + final Class indexStoreModule = settings.getAsClass(STORE_TYPE, null, "org.elasticsearch.index.store.", "IndexStoreModule"); return ImmutableList.of(Modules.createModule(indexStoreModule, settings)); } @Override - protected void configure() { + protected void configure() {} + + private static class DefaultStoreModule extends AbstractModule { + @Override + protected void configure() { + bind(IndexStore.class).asEagerSingleton(); + } } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/Store.java b/src/main/java/org/elasticsearch/index/store/Store.java index 117547c98df..71dd77c690e 100644 --- a/src/main/java/org/elasticsearch/index/store/Store.java +++ b/src/main/java/org/elasticsearch/index/store/Store.java @@ -27,7 +27,6 @@ import org.apache.lucene.index.*; import org.apache.lucene.store.*; import org.apache.lucene.util.*; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; @@ -532,7 +531,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref * @param reason the reason for this cleanup operation logged for each deleted file * @param sourceMetaData the metadata used for cleanup. all files in this metadata should be kept around. * @throws IOException if an IOException occurs - * @throws ElasticsearchIllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. + * @throws IllegalStateException if the latest snapshot in this store differs from the given one after the cleanup. */ public void cleanupAndVerify(String reason, MetadataSnapshot sourceMetaData) throws IOException { metadataLock.writeLock().lock(); @@ -553,7 +552,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref || existingFile.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { // TODO do we need to also fail this if we can't delete the pending commit file? // if one of those files can't be deleted we better fail the cleanup otherwise we might leave an old commit point around? - throw new ElasticsearchIllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); + throw new IllegalStateException("Can't delete " + existingFile + " - cleanup failed", ex); } logger.debug("failed to delete file [{}]", ex, existingFile); // ignore, we don't really care, will get deleted later on @@ -592,12 +591,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref final boolean consistent = hashAndLengthEqual || same; if (consistent == false) { logger.debug("Files are different on the recovery target: {} ", recoveryDiff); - throw new ElasticsearchIllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); + throw new IllegalStateException("local version: " + local + " is different from remote version after recovery: " + remote, null); } } } else { logger.debug("Files are missing on the recovery target: {} ", recoveryDiff); - throw new ElasticsearchIllegalStateException("Files are missing on the recovery target: [different=" + throw new IllegalStateException("Files are missing on the recovery target: [different=" + recoveryDiff.different + ", missing=" + recoveryDiff.missing + ']', null); } } diff --git a/src/main/java/org/elasticsearch/index/store/StoreModule.java b/src/main/java/org/elasticsearch/index/store/StoreModule.java index fd6fe6e11bc..273455bf214 100644 --- a/src/main/java/org/elasticsearch/index/store/StoreModule.java +++ b/src/main/java/org/elasticsearch/index/store/StoreModule.java @@ -29,11 +29,6 @@ import org.elasticsearch.index.shard.ShardPath; */ public class StoreModule extends AbstractModule { - public static final String DISTIBUTOR_KEY = "index.store.distributor"; - public static final String LEAST_USED_DISTRIBUTOR = "least_used"; - public static final String RANDOM_WEIGHT_DISTRIBUTOR = "random"; - - private final Settings settings; private final ShardLock lock; private final Store.OnClose closeCallback; @@ -41,9 +36,8 @@ public class StoreModule extends AbstractModule { private final Class shardDirectory; - public StoreModule(Settings settings, Class shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) { + public StoreModule(Class shardDirectory, ShardLock lock, Store.OnClose closeCallback, ShardPath path) { this.shardDirectory = shardDirectory; - this.settings = settings; this.lock = lock; this.closeCallback = closeCallback; this.path = path; diff --git a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java deleted file mode 100644 index ee1ed85f1e7..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsDirectoryService.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import com.google.common.collect.Sets; -import org.apache.lucene.store.*; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.Set; - -/** - */ -public class DefaultFsDirectoryService extends FsDirectoryService { - /* - * We are mmapping docvalues as well as term dictionaries, all other files are served through NIOFS - * this provides good random access performance while not creating unnecessary mmaps for files like stored - * fields etc. - */ - private static final Set PRIMARY_EXTENSIONS = Collections.unmodifiableSet(Sets.newHashSet("dvd", "tim")); - - @Inject - public DefaultFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - final MMapDirectory mmapDir = new MMapDirectory(location, lockFactory); - return new FileSwitchDirectory(PRIMARY_EXTENSIONS, mmapDir, new NIOFSDirectory(location, lockFactory), true) { - @Override - public String[] listAll() throws IOException { - // Avoid doing listAll twice: - return mmapDir.listAll(); - } - }; - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java deleted file mode 100644 index ccd8369a58e..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class DefaultFsIndexStore extends AbstractIndexStore { - - @Inject - public DefaultFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return DefaultFsDirectoryService.class; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java deleted file mode 100644 index 9f9102eba4d..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/MmapFsDirectoryService.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.MMapDirectory; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; - -/** - */ -public class MmapFsDirectoryService extends FsDirectoryService { - - @Inject - public MmapFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - return new MMapDirectory(location, buildLockFactory()); - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java deleted file mode 100644 index 444cd67b713..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class MmapFsIndexStore extends AbstractIndexStore { - - @Inject - public MmapFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return MmapFsDirectoryService.class; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java deleted file mode 100644 index b2d99fa3b82..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/NioFsDirectoryService.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.NIOFSDirectory; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; - -/** - */ -public class NioFsDirectoryService extends FsDirectoryService { - - @Inject - public NioFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - return new NIOFSDirectory(location, lockFactory); - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java deleted file mode 100644 index 7f5cece99b6..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class NioFsIndexStore extends AbstractIndexStore { - - @Inject - public NioFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return NioFsDirectoryService.class; - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java deleted file mode 100644 index 9db1cbdec6b..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/NioFsIndexStoreModule.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; - -/** - * - */ -public class NioFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(NioFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java deleted file mode 100644 index 051b278cd11..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsDirectoryService.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.SimpleFSDirectory; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.IndexStore; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; - -/** - */ -public class SimpleFsDirectoryService extends FsDirectoryService { - - @Inject - public SimpleFsDirectoryService(ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, ShardPath shardPath) { - super(shardId, indexSettings, indexStore, shardPath); - } - - @Override - protected Directory newFSDirectory(Path location, LockFactory lockFactory) throws IOException { - return new SimpleFSDirectory(location, lockFactory); - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java deleted file mode 100644 index 1a9f40b9779..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStore.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -/** - * - */ -public final class SimpleFsIndexStore extends AbstractIndexStore { - - @Inject - public SimpleFsIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); - } - - @Override - public Class shardDirectory() { - return SimpleFsDirectoryService.class; - } -} diff --git a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java b/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java deleted file mode 100644 index c35997aaf0d..00000000000 --- a/src/main/java/org/elasticsearch/index/store/fs/SimpleFsIndexStoreModule.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.fs; - -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; - -/** - * - */ -public class SimpleFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(SimpleFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java b/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java deleted file mode 100644 index f4e63c55ebf..00000000000 --- a/src/main/java/org/elasticsearch/index/store/support/AbstractIndexStore.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store.support; - -import org.apache.lucene.store.StoreRateLimiting; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.common.io.FileSystemUtils; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.env.NodeEnvironment; -import org.elasticsearch.index.AbstractIndexComponent; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.settings.IndexSettings; -import org.elasticsearch.index.settings.IndexSettingsService; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.indices.store.IndicesStore; - -import java.io.IOException; -import java.nio.file.Path; - -/** - * - */ -public abstract class AbstractIndexStore extends AbstractIndexComponent implements IndexStore { - - public static final String INDEX_STORE_THROTTLE_TYPE = "index.store.throttle.type"; - public static final String INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC = "index.store.throttle.max_bytes_per_sec"; - - class ApplySettings implements IndexSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - String rateLimitingType = settings.get(INDEX_STORE_THROTTLE_TYPE, AbstractIndexStore.this.rateLimitingType); - if (!rateLimitingType.equals(AbstractIndexStore.this.rateLimitingType)) { - logger.info("updating index.store.throttle.type from [{}] to [{}]", AbstractIndexStore.this.rateLimitingType, rateLimitingType); - if (rateLimitingType.equalsIgnoreCase("node")) { - AbstractIndexStore.this.rateLimitingType = rateLimitingType; - AbstractIndexStore.this.nodeRateLimiting = true; - } else { - StoreRateLimiting.Type.fromString(rateLimitingType); - AbstractIndexStore.this.rateLimitingType = rateLimitingType; - AbstractIndexStore.this.nodeRateLimiting = false; - AbstractIndexStore.this.rateLimiting.setType(rateLimitingType); - } - } - - ByteSizeValue rateLimitingThrottle = settings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, AbstractIndexStore.this.rateLimitingThrottle); - if (!rateLimitingThrottle.equals(AbstractIndexStore.this.rateLimitingThrottle)) { - logger.info("updating index.store.throttle.max_bytes_per_sec from [{}] to [{}], note, type is [{}]", AbstractIndexStore.this.rateLimitingThrottle, rateLimitingThrottle, AbstractIndexStore.this.rateLimitingType); - AbstractIndexStore.this.rateLimitingThrottle = rateLimitingThrottle; - AbstractIndexStore.this.rateLimiting.setMaxRate(rateLimitingThrottle); - } - } - } - private final NodeEnvironment nodeEnv; - - private final Path[] locations; - - protected final IndexService indexService; - - protected final IndicesStore indicesStore; - - private volatile String rateLimitingType; - private volatile ByteSizeValue rateLimitingThrottle; - private volatile boolean nodeRateLimiting; - - private final StoreRateLimiting rateLimiting = new StoreRateLimiting(); - - private final ApplySettings applySettings = new ApplySettings(); - - protected AbstractIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings); - this.indexService = indexService; - this.indicesStore = indicesStore; - - this.rateLimitingType = indexSettings.get(INDEX_STORE_THROTTLE_TYPE, "none"); - if (rateLimitingType.equalsIgnoreCase("node")) { - nodeRateLimiting = true; - } else { - nodeRateLimiting = false; - rateLimiting.setType(rateLimitingType); - } - this.rateLimitingThrottle = indexSettings.getAsBytesSize(INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, new ByteSizeValue(0)); - rateLimiting.setMaxRate(rateLimitingThrottle); - - logger.debug("using index.store.throttle.type [{}], with index.store.throttle.max_bytes_per_sec [{}]", rateLimitingType, rateLimitingThrottle); - - indexService.settingsService().addListener(applySettings); - this.nodeEnv = nodeEnv; - if (nodeEnv.hasNodeFile()) { - this.locations = nodeEnv.indexPaths(index); - } else { - this.locations = null; - } - - } - - @Override - public void close() throws ElasticsearchException { - indexService.settingsService().removeListener(applySettings); - } - - @Override - public StoreRateLimiting rateLimiting() { - return nodeRateLimiting ? indicesStore.rateLimiting() : this.rateLimiting; - } -} diff --git a/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java index 6d60d21b1fe..9ca66a65ec7 100644 --- a/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java +++ b/src/main/java/org/elasticsearch/index/termvectors/ShardTermVectorsService.java @@ -20,7 +20,12 @@ package org.elasticsearch.index.termvectors; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.index.*; +import org.apache.lucene.index.Fields; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexableField; +import org.apache.lucene.index.MultiFields; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; import org.apache.lucene.index.memory.MemoryIndex; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.termvectors.TermVectorsFilter; @@ -40,18 +45,30 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.get.GetField; import org.elasticsearch.index.get.GetResult; -import org.elasticsearch.index.mapper.*; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.AbstractIndexShardComponent; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.dfs.AggregatedDfs; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import static org.elasticsearch.index.mapper.SourceToParse.source; @@ -285,7 +302,6 @@ public class ShardTermVectorsService extends AbstractIndexShardComponent { private ParsedDocument parseDocument(String index, String type, BytesReference doc) throws Throwable { MapperService mapperService = indexShard.mapperService(); - IndexService indexService = indexShard.indexService(); // TODO: make parsing not dynamically create fields not in the original mapping Tuple docMapper = mapperService.documentMapperWithAutoCreate(type); @@ -294,7 +310,7 @@ public class ShardTermVectorsService extends AbstractIndexShardComponent { parsedDocument.addDynamicMappingsUpdate(docMapper.v2()); } if (parsedDocument.dynamicMappingsUpdate() != null) { - mappingUpdatedAction.updateMappingOnMasterSynchronously(index, indexService.indexUUID(), type, parsedDocument.dynamicMappingsUpdate()); + mappingUpdatedAction.updateMappingOnMasterSynchronously(index, type, parsedDocument.dynamicMappingsUpdate()); } return parsedDocument; } diff --git a/src/main/java/org/elasticsearch/index/translog/Translog.java b/src/main/java/org/elasticsearch/index/translog/Translog.java index 5e132304779..63970ab3aaf 100644 --- a/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -23,8 +23,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -269,7 +267,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { case 4: return DELETE_BY_QUERY; default: - throw new ElasticsearchIllegalArgumentException("No type mapped for [" + id + "]"); + throw new IllegalArgumentException("No type mapped for [" + id + "]"); } } } @@ -635,7 +633,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { @Override public Source getSource(){ - throw new ElasticsearchIllegalStateException("trying to read doc source from delete operation"); + throw new IllegalStateException("trying to read doc source from delete operation"); } @Override @@ -662,6 +660,8 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { } } + /** @deprecated Delete-by-query is removed in 2.0, but we keep this so translog can replay on upgrade. */ + @Deprecated static class DeleteByQuery implements Operation { public static final int SERIALIZATION_FORMAT = 2; @@ -707,7 +707,7 @@ public interface Translog extends IndexShardComponent, Closeable, Accountable { @Override public Source getSource() { - throw new ElasticsearchIllegalStateException("trying to read doc source from delete_by_query operation"); + throw new IllegalStateException("trying to read doc source from delete_by_query operation"); } @Override diff --git a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java index 6fb3988b829..ebd5e125353 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/BufferingFsTranslogFile.java @@ -239,6 +239,11 @@ public class BufferingFsTranslogFile implements FsTranslogFile { return channelReference.file(); } + @Override + public boolean closed() { + return this.closed.get(); + } + class WrapperOutputStream extends OutputStream { @Override diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java index be2bd5de4b0..bff79bb5dbf 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsChannelSnapshot.java @@ -145,7 +145,7 @@ public class FsChannelSnapshot implements Translog.Snapshot { } @Override - public void close() throws ElasticsearchException { + public void close() { if (closed.compareAndSet(false, true)) { channelReference.decRef(); } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java index 2479c1a0de2..15b76333b7a 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslog.java @@ -50,6 +50,7 @@ import java.nio.file.*; import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.regex.Matcher; @@ -93,8 +94,6 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog private final ApplySettings applySettings = new ApplySettings(); - - @Inject public FsTranslog(ShardId shardId, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, BigArrays bigArrays, ShardPath shardPath) throws IOException { @@ -355,10 +354,15 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog @Override public FsChannelSnapshot snapshot() throws TranslogException { while (true) { + FsTranslogFile current = this.current; FsChannelSnapshot snapshot = current.snapshot(); if (snapshot != null) { return snapshot; } + if (current.closed() && this.current == current) { + // check if we are closed and if we are still current - then this translog is closed and we can exit + throw new TranslogException(shardId, "current translog is already closed"); + } Thread.yield(); } } @@ -532,7 +536,7 @@ public class FsTranslog extends AbstractIndexShardComponent implements Translog } @Override - public void close() throws ElasticsearchException { + public void close() { try { input.close(); } catch (IOException ex) { diff --git a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java index 7cfe8744660..751bfc3ec5b 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/FsTranslogFile.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.translog.fs; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.translog.Translog; @@ -49,13 +48,13 @@ public interface FsTranslogFile extends Closeable { public abstract FsTranslogFile create(ShardId shardId, long id, ChannelReference raf, int bufferSize) throws IOException; - public static Type fromString(String type) throws ElasticsearchIllegalArgumentException { + public static Type fromString(String type) { if (SIMPLE.name().equalsIgnoreCase(type)) { return SIMPLE; } else if (BUFFERED.name().equalsIgnoreCase(type)) { return BUFFERED; } - throw new ElasticsearchIllegalArgumentException("No translog fs type [" + type + "]"); + throw new IllegalArgumentException("No translog fs type [" + type + "]"); } } @@ -82,4 +81,6 @@ public interface FsTranslogFile extends Closeable { TranslogStream getStream(); public Path getPath(); + + public boolean closed(); } diff --git a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java index d4d508b83e2..199847d0779 100644 --- a/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java +++ b/src/main/java/org/elasticsearch/index/translog/fs/SimpleFsTranslogFile.java @@ -182,4 +182,10 @@ public class SimpleFsTranslogFile implements FsTranslogFile { public void updateBufferSize(int bufferSize) throws TranslogException { // nothing to do here... } + + @Override + public boolean closed() { + return this.closed.get(); + } + } diff --git a/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java b/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java index aa7b559bb30..95d1c0f4d6d 100644 --- a/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java +++ b/src/main/java/org/elasticsearch/indices/AliasFilterParsingException.java @@ -27,10 +27,6 @@ import org.elasticsearch.index.IndexException; */ public class AliasFilterParsingException extends IndexException { - public AliasFilterParsingException(Index index, String name, String desc) { - super(index, "[" + name + "], " + desc); - } - public AliasFilterParsingException(Index index, String name, String desc, Throwable ex) { super(index, "[" + name + "], " + desc, ex); } diff --git a/src/main/java/org/elasticsearch/indices/IndexMissingException.java b/src/main/java/org/elasticsearch/indices/IndexMissingException.java index dbadef5a076..1bec1585a7c 100644 --- a/src/main/java/org/elasticsearch/indices/IndexMissingException.java +++ b/src/main/java/org/elasticsearch/indices/IndexMissingException.java @@ -29,7 +29,7 @@ import org.elasticsearch.rest.RestStatus; public class IndexMissingException extends IndexException { public IndexMissingException(Index index) { - super(index, "missing"); + super(index, "no such index"); } @Override diff --git a/src/main/java/org/elasticsearch/indices/IndicesService.java b/src/main/java/org/elasticsearch/indices/IndicesService.java index a78a0b4fd79..663fe402729 100644 --- a/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -25,8 +25,6 @@ import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -124,11 +122,11 @@ public class IndicesService extends AbstractLifecycleComponent i } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { ImmutableSet indices = ImmutableSet.copyOf(this.indices.keySet()); final CountDownLatch latch = new CountDownLatch(indices.size()); @@ -160,7 +158,7 @@ public class IndicesService extends AbstractLifecycleComponent i } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { IOUtils.closeWhileHandlingException(injector.getInstance(RecoverySettings.class), indicesAnalysisService); } @@ -280,9 +278,9 @@ public class IndicesService extends AbstractLifecycleComponent i return indexService; } - public synchronized IndexService createIndex(String sIndexName, @IndexSettings Settings settings, String localNodeId) throws ElasticsearchException { + public synchronized IndexService createIndex(String sIndexName, @IndexSettings Settings settings, String localNodeId) { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("Can't create an index [" + sIndexName + "], node is closed"); + throw new IllegalStateException("Can't create an index [" + sIndexName + "], node is closed"); } Index index = new Index(sIndexName); if (indices.containsKey(index.name())) { @@ -342,11 +340,11 @@ public class IndicesService extends AbstractLifecycleComponent i * @param index the index to remove * @param reason the high level reason causing this removal */ - public void removeIndex(String index, String reason) throws ElasticsearchException { + public void removeIndex(String index, String reason) { removeIndex(index, reason, false); } - private void removeIndex(String index, String reason, boolean delete) throws ElasticsearchException { + private void removeIndex(String index, String reason, boolean delete) { try { final IndexService indexService; final Injector indexInjector; @@ -449,7 +447,7 @@ public class IndicesService extends AbstractLifecycleComponent i try { if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); - throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); } catch (IOException e) { @@ -468,13 +466,13 @@ public class IndicesService extends AbstractLifecycleComponent i String indexName = metaData.index(); if (indices.containsKey(indexName)) { String localUUid = indices.get(indexName).v1().indexUUID(); - throw new ElasticsearchIllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]"); } if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) { // we do not delete the store if it is a master eligible node and the index is still in the cluster state // because we want to keep the meta data for indices around even if no shards are left here final IndexMetaData index = clusterState.metaData().index(indexName); - throw new ElasticsearchIllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); + throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]"); } } Index index = new Index(metaData.index()); @@ -533,7 +531,7 @@ public class IndicesService extends AbstractLifecycleComponent i public void deleteShardStore(String reason, ShardId shardId, IndexMetaData metaData) throws IOException { final Settings indexSettings = buildIndexSettings(metaData); if (canDeleteShardContent(shardId, indexSettings) == false) { - throw new ElasticsearchIllegalStateException("Can't delete shard " + shardId); + throw new IllegalStateException("Can't delete shard " + shardId); } nodeEnv.deleteShardDirectorySafe(shardId, indexSettings); logger.trace("{} deleting shard reason [{}]", shardId, reason); @@ -614,10 +612,10 @@ public class IndicesService extends AbstractLifecycleComponent i */ public void addPendingDelete(ShardId shardId, @IndexSettings Settings settings) { if (shardId == null) { - throw new ElasticsearchIllegalArgumentException("shardId must not be null"); + throw new IllegalArgumentException("shardId must not be null"); } if (settings == null) { - throw new ElasticsearchIllegalArgumentException("settings must not be null"); + throw new IllegalArgumentException("settings must not be null"); } PendingDelete pendingDelete = new PendingDelete(shardId, settings, false); addPendingDelete(shardId.index(), pendingDelete); @@ -751,4 +749,4 @@ public class IndicesService extends AbstractLifecycleComponent i return deleteList.size(); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index ee0ff0c81c6..ddc04403e7f 100644 --- a/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices; import com.google.common.collect.Lists; import com.google.common.collect.Maps; + import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; diff --git a/src/main/java/org/elasticsearch/indices/SyncedFlushService.java b/src/main/java/org/elasticsearch/indices/SyncedFlushService.java index a0648c31b9a..4ec05620863 100644 --- a/src/main/java/org/elasticsearch/indices/SyncedFlushService.java +++ b/src/main/java/org/elasticsearch/indices/SyncedFlushService.java @@ -19,6 +19,7 @@ package org.elasticsearch.indices; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.node.liveness.LivenessRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -77,9 +78,9 @@ public class SyncedFlushService extends AbstractComponent { this.clusterService = clusterService; this.transportService = transportService; - transportService.registerHandler(PRE_SYNCED_FLUSH_ACTION_NAME, new PreSyncedFlushTransportHandler()); - transportService.registerHandler(SYNCED_FLUSH_ACTION_NAME, new SyncedFlushTransportHandler()); - transportService.registerHandler(IN_FLIGHT_OPS_ACTION_NAME, new InFlightOpCountTransportHandler()); + transportService.registerRequestHandler(PRE_SYNCED_FLUSH_ACTION_NAME, PreSyncedFlushRequest.class, ThreadPool.Names.FLUSH, new PreSyncedFlushTransportHandler()); + transportService.registerRequestHandler(SYNCED_FLUSH_ACTION_NAME, SyncedFlushRequest.class, ThreadPool.Names.FLUSH, new SyncedFlushTransportHandler()); + transportService.registerRequestHandler(IN_FLIGHT_OPS_ACTION_NAME, InFlightOpsRequest.class, ThreadPool.Names.SAME, new InFlightOpCountTransportHandler()); preSyncTimeout = settings.getAsTime(SETTING_PRE_SYNC_TIMEOUT, TimeValue.timeValueMinutes(5)); syncTimeout = settings.getAsTime(SETTING_SYNC_TIMEOUT, TimeValue.timeValueMinutes(5)); inflightOpsTimeout = settings.getAsTime(SETTING_IN_FLIGHT_OPS_TIMEOUT, TimeValue.timeValueMinutes(5)); @@ -113,7 +114,9 @@ public class SyncedFlushService extends AbstractComponent { return new SyncedFlushResult(syncId, results); } - /** returns the number of inflight operations on primary. -1 upon error. */ + /** + * returns the number of inflight operations on primary. -1 upon error. + */ protected int getInflightOpsCount(final ShardId shardId, ClusterState state, IndexShardRoutingTable shardRoutingTable) { final ShardRouting primaryShard = shardRoutingTable.primaryShard(); final DiscoveryNode primaryNode = state.nodes().get(primaryShard.currentNodeId()); @@ -220,7 +223,9 @@ public class SyncedFlushService extends AbstractComponent { return results; } - /** send presync requests to all started copies of the given shard */ + /** + * send presync requests to all started copies of the given shard + */ Map sendPreSyncRequests(final List shards, final ClusterState state, final ShardId shardId) { final CountDownLatch countDownLatch = new CountDownLatch(shards.size()); final Map commitIds = ConcurrentCollections.newConcurrentMap(); @@ -313,14 +318,18 @@ public class SyncedFlushService extends AbstractComponent { private final Map shardResponses; private final String syncId; - /** failure constructor */ + /** + * failure constructor + */ SyncedFlushResult(String failureReason) { this.syncId = null; this.failureReason = failureReason; this.shardResponses = new HashMap<>(); } - /** success constructor */ + /** + * success constructor + */ SyncedFlushResult(String syncId, Map shardResponses) { this.failureReason = null; this.shardResponses = shardResponses; @@ -339,7 +348,9 @@ public class SyncedFlushService extends AbstractComponent { return syncId; } - /** total number of shards for which a sync attempt was made */ + /** + * total number of shards for which a sync attempt was made + */ public int totalShards() { return shardResponses.size(); } @@ -477,7 +488,9 @@ public class SyncedFlushService extends AbstractComponent { static final class SyncedFlushResponse extends TransportResponse { - /** a non null value indicates a failure to sync flush. null means success */ + /** + * a non null value indicates a failure to sync flush. null means success + */ String failureReason; @@ -591,59 +604,29 @@ public class SyncedFlushService extends AbstractComponent { } - private class PreSyncedFlushTransportHandler extends BaseTransportRequestHandler { - - @Override - public PreSyncedFlushRequest newInstance() { - return new PreSyncedFlushRequest(); - } + private class PreSyncedFlushTransportHandler implements TransportRequestHandler { @Override public void messageReceived(PreSyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performPreSyncedFlush(request)); } - - @Override - public String executor() { - return ThreadPool.Names.FLUSH; - } } - private class SyncedFlushTransportHandler extends BaseTransportRequestHandler { - - @Override - public SyncedFlushRequest newInstance() { - return new SyncedFlushRequest(); - } + private class SyncedFlushTransportHandler implements TransportRequestHandler { @Override public void messageReceived(SyncedFlushRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performSyncedFlush(request)); } - - @Override - public String executor() { - return ThreadPool.Names.FLUSH; - } } - private class InFlightOpCountTransportHandler extends BaseTransportRequestHandler { - - @Override - public InFlightOpsRequest newInstance() { - return new InFlightOpsRequest(); - } + private class InFlightOpCountTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InFlightOpsRequest request, TransportChannel channel) throws Exception { channel.sendResponse(performInFlightOps(request)); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } } diff --git a/src/main/java/org/elasticsearch/indices/TypeMissingException.java b/src/main/java/org/elasticsearch/indices/TypeMissingException.java index ae2830b9575..eb80c057a45 100644 --- a/src/main/java/org/elasticsearch/indices/TypeMissingException.java +++ b/src/main/java/org/elasticsearch/indices/TypeMissingException.java @@ -34,11 +34,6 @@ public class TypeMissingException extends IndexException { super(index, "type[" + Arrays.toString(types) + "] missing"); } - public TypeMissingException(Index index, String[] types, String message) { - super(index, "type[" + Arrays.toString(types) + "] missing: " + message); - } - - @Override public RestStatus status() { return RestStatus.NOT_FOUND; diff --git a/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java b/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java index aaa81a68bec..cf9db03463d 100644 --- a/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java +++ b/src/main/java/org/elasticsearch/indices/breaker/CircuitBreakerService.java @@ -57,14 +57,14 @@ public abstract class CircuitBreakerService extends AbstractLifecycleComponent { +public class IndicesFilterCache extends AbstractComponent implements QueryCache, Closeable { - private final ThreadPool threadPool; + public static final String INDICES_CACHE_QUERY_SIZE = "indices.cache.filter.size"; + public static final String INDICES_CACHE_QUERY_COUNT = "indices.cache.filter.count"; - private Cache cache; + private final LRUQueryCache cache; + private final ShardCoreKeyMap shardKeyMap = new ShardCoreKeyMap(); + private final Map shardStats = new ConcurrentHashMap<>(); + private volatile long sharedRamBytesUsed; - private volatile String size; - private volatile long sizeInBytes; - private volatile TimeValue expire; - private volatile int concurrencyLevel; - - private final TimeValue cleanInterval; - private final int minimumEntryWeight; - - private final Set readersKeysToClean = ConcurrentCollections.newConcurrentSet(); - - private volatile boolean closed; - - public static final String INDICES_CACHE_FILTER_SIZE = "indices.cache.filter.size"; - public static final String INDICES_CACHE_FILTER_EXPIRE = "indices.cache.filter.expire"; - public static final String INDICES_CACHE_FILTER_CONCURRENCY_LEVEL = "indices.cache.filter.concurrency_level"; - public static final String INDICES_CACHE_FILTER_CLEAN_INTERVAL = "indices.cache.filter.clean_interval"; - public static final String INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT = "indices.cache.filter.minimum_entry_weight"; - - class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - boolean replace = false; - String size = settings.get(INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size); - if (!size.equals(IndicesFilterCache.this.size)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_SIZE, IndicesFilterCache.this.size, size); - IndicesFilterCache.this.size = size; - replace = true; - } - TimeValue expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire); - if (!Objects.equal(expire, IndicesFilterCache.this.expire)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_EXPIRE, IndicesFilterCache.this.expire, expire); - IndicesFilterCache.this.expire = expire; - replace = true; - } - final int concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel); - if (concurrencyLevel <= 0) { - throw new ElasticsearchIllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); - } - if (!Objects.equal(concurrencyLevel, IndicesFilterCache.this.concurrencyLevel)) { - logger.info("updating [{}] from [{}] to [{}]", - INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, IndicesFilterCache.this.concurrencyLevel, concurrencyLevel); - IndicesFilterCache.this.concurrencyLevel = concurrencyLevel; - replace = true; - } - if (replace) { - Cache oldCache = IndicesFilterCache.this.cache; - computeSizeInBytes(); - buildCache(); - oldCache.invalidateAll(); - } - } - } + // This is a hack for the fact that the close listener for the + // ShardCoreKeyMap will be called before onDocIdSetEviction + // See onDocIdSetEviction for more info + private final Map stats2 = new IdentityHashMap<>(); @Inject - public IndicesFilterCache(Settings settings, ThreadPool threadPool, NodeSettingsService nodeSettingsService) { + public IndicesFilterCache(Settings settings) { super(settings); - this.threadPool = threadPool; - this.size = settings.get(INDICES_CACHE_FILTER_SIZE, "10%"); - this.expire = settings.getAsTime(INDICES_CACHE_FILTER_EXPIRE, null); - this.minimumEntryWeight = settings.getAsInt(INDICES_CACHE_FILTER_MINIMUM_ENTRY_WEIGHT, 1024); // 1k per entry minimum - if (minimumEntryWeight <= 0) { - throw new ElasticsearchIllegalArgumentException("minimum_entry_weight must be > 0 but was: " + minimumEntryWeight); + final String sizeString = settings.get(INDICES_CACHE_QUERY_SIZE, "10%"); + final ByteSizeValue size = MemorySizeValue.parseBytesSizeValueOrHeapRatio(sizeString); + final int count = settings.getAsInt(INDICES_CACHE_QUERY_COUNT, 100000); + logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], max filter count [{}]", + sizeString, size, count); + cache = new LRUQueryCache(count, size.bytes()) { + + private Stats getStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + if (shardId == null) { + return null; + } + return shardStats.get(shardId); + } + + private Stats getOrCreateStats(Object coreKey) { + final ShardId shardId = shardKeyMap.getShardId(coreKey); + Stats stats = shardStats.get(shardId); + if (stats == null) { + stats = new Stats(); + shardStats.put(shardId, stats); + } + return stats; + } + + // It's ok to not protect these callbacks by a lock since it is + // done in LRUQueryCache + @Override + protected void onClear() { + assert Thread.holdsLock(this); + super.onClear(); + for (Stats stats : shardStats.values()) { + // don't throw away hit/miss + stats.cacheSize = 0; + stats.ramBytesUsed = 0; + } + sharedRamBytesUsed = 0; + } + + @Override + protected void onQueryCache(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryCache(filter, ramBytesUsed); + sharedRamBytesUsed += ramBytesUsed; + } + + @Override + protected void onQueryEviction(Query filter, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onQueryEviction(filter, ramBytesUsed); + sharedRamBytesUsed -= ramBytesUsed; + } + + @Override + protected void onDocIdSetCache(Object readerCoreKey, long ramBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetCache(readerCoreKey, ramBytesUsed); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.cacheSize += 1; + shardStats.cacheCount += 1; + shardStats.ramBytesUsed += ramBytesUsed; + + StatsAndCount statsAndCount = stats2.get(readerCoreKey); + if (statsAndCount == null) { + statsAndCount = new StatsAndCount(shardStats); + stats2.put(readerCoreKey, statsAndCount); + } + statsAndCount.count += 1; + } + + @Override + protected void onDocIdSetEviction(Object readerCoreKey, int numEntries, long sumRamBytesUsed) { + assert Thread.holdsLock(this); + super.onDocIdSetEviction(readerCoreKey, numEntries, sumRamBytesUsed); + // We can't use ShardCoreKeyMap here because its core closed + // listener is called before the listener of the cache which + // triggers this eviction. So instead we use use stats2 that + // we only evict when nothing is cached anymore on the segment + // instead of relying on close listeners + final StatsAndCount statsAndCount = stats2.get(readerCoreKey); + final Stats shardStats = statsAndCount.stats; + shardStats.cacheSize -= numEntries; + shardStats.ramBytesUsed -= sumRamBytesUsed; + statsAndCount.count -= numEntries; + if (statsAndCount.count == 0) { + stats2.remove(readerCoreKey); + } + } + + @Override + protected void onHit(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onHit(readerCoreKey, filter); + final Stats shardStats = getStats(readerCoreKey); + shardStats.hitCount += 1; + } + + @Override + protected void onMiss(Object readerCoreKey, Query filter) { + assert Thread.holdsLock(this); + super.onMiss(readerCoreKey, filter); + final Stats shardStats = getOrCreateStats(readerCoreKey); + shardStats.missCount += 1; + } + }; + sharedRamBytesUsed = 0; + } + + /** Get usage statistics for the given shard. */ + public FilterCacheStats getStats(ShardId shard) { + final Map stats = new HashMap<>(); + for (Map.Entry entry : shardStats.entrySet()) { + stats.put(entry.getKey(), entry.getValue().toQueryCacheStats()); } - this.cleanInterval = settings.getAsTime(INDICES_CACHE_FILTER_CLEAN_INTERVAL, TimeValue.timeValueSeconds(60)); - // defaults to 4, but this is a busy map for all indices, increase it a bit - this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, 16); - if (concurrencyLevel <= 0) { - throw new ElasticsearchIllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + FilterCacheStats shardStats = new FilterCacheStats(); + FilterCacheStats info = stats.get(shard); + if (info == null) { + info = new FilterCacheStats(); } - computeSizeInBytes(); - buildCache(); - logger.debug("using [node] weighted filter cache with size [{}], actual_size [{}], expire [{}], clean_interval [{}]", - size, new ByteSizeValue(sizeInBytes), expire, cleanInterval); + shardStats.add(info); - nodeSettingsService.addListener(new ApplySettings()); - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, new ReaderCleaner()); - } - - private void buildCache() { - CacheBuilder cacheBuilder = CacheBuilder.newBuilder() - .removalListener(this) - .maximumWeight(sizeInBytes).weigher(new WeightedFilterCache.FilterCacheValueWeigher(minimumEntryWeight)); - - cacheBuilder.concurrencyLevel(this.concurrencyLevel); - - if (expire != null) { - cacheBuilder.expireAfterAccess(expire.millis(), TimeUnit.MILLISECONDS); + // We also have some shared ram usage that we try to distribute to + // proportionally to their number of cache entries of each shard + long totalSize = 0; + for (FilterCacheStats s : stats.values()) { + totalSize += s.getCacheSize(); } - - cache = cacheBuilder.build(); - } - - private void computeSizeInBytes() { - this.sizeInBytes = MemorySizeValue.parseBytesSizeValueOrHeapRatio(size).bytes(); - } - - public void addReaderKeyToClean(Object readerKey) { - readersKeysToClean.add(readerKey); - } - - public void close() { - closed = true; - cache.invalidateAll(); - } - - public Cache cache() { - return this.cache; + final double weight = totalSize == 0 + ? 1d / stats.size() + : shardStats.getCacheSize() / totalSize; + final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); + shardStats.add(new FilterCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); + return shardStats; } @Override - public void onRemoval(RemovalNotification removalNotification) { - WeightedFilterCache.FilterCacheKey key = removalNotification.getKey(); - if (key == null) { - return; - } - if (key.removalListener != null) { - key.removalListener.onRemoval(removalNotification); + public Weight doCache(Weight weight, QueryCachingPolicy policy) { + while (weight instanceof CachingWeightWrapper) { + weight = ((CachingWeightWrapper) weight).in; } + final Weight in = cache.doCache(weight, policy); + // We wrap the weight to track the readers it sees and map them with + // the shards they belong to + return new CachingWeightWrapper(in); } - /** - * The reason we need this class is because we need to clean all the filters that are associated - * with a reader. We don't want to do it every time a reader closes, since iterating over all the map - * is expensive. There doesn't seem to be a nicer way to do it (and maintaining a list per reader - * of the filters will cost more). - */ - class ReaderCleaner implements Runnable { + private class CachingWeightWrapper extends Weight { - // this is thread safe since we only schedule the next cleanup once the current one is - // done, so no concurrent execution - private final ObjectOpenHashSet keys = ObjectOpenHashSet.newInstance(); + private final Weight in; + + protected CachingWeightWrapper(Weight in) { + super(in.getQuery()); + this.in = in; + } @Override - public void run() { - if (closed) { - return; - } - if (readersKeysToClean.isEmpty()) { - schedule(); - return; - } - try { - threadPool.executor(ThreadPool.Names.GENERIC).execute(new Runnable() { - @Override - public void run() { - keys.clear(); - for (Iterator it = readersKeysToClean.iterator(); it.hasNext(); ) { - keys.add(it.next()); - it.remove(); - } - if (!keys.isEmpty()) { - for (Iterator it = cache.asMap().keySet().iterator(); it.hasNext(); ) { - WeightedFilterCache.FilterCacheKey filterCacheKey = it.next(); - if (keys.contains(filterCacheKey.readerKey())) { - // same as invalidate - it.remove(); - } - } - } - cache.cleanUp(); - schedule(); - keys.clear(); - } - }); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not run ReaderCleaner - execution rejected", ex); - } + public void extractTerms(Set terms) { + in.extractTerms(terms); } - private void schedule() { - try { - threadPool.schedule(cleanInterval, ThreadPool.Names.SAME, this); - } catch (EsRejectedExecutionException ex) { - logger.debug("Can not schedule ReaderCleaner - execution rejected", ex); - } + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + shardKeyMap.add(context.reader()); + return in.explain(context, doc); + } + + @Override + public float getValueForNormalization() throws IOException { + return in.getValueForNormalization(); + } + + @Override + public void normalize(float norm, float topLevelBoost) { + in.normalize(norm, topLevelBoost); + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + shardKeyMap.add(context.reader()); + return in.scorer(context, acceptDocs); } } -} \ No newline at end of file + + /** Clear all entries that belong to the given index. */ + public void clearIndex(String index) { + final Set coreCacheKeys = shardKeyMap.getCoreKeysForIndex(index); + for (Object coreKey : coreCacheKeys) { + cache.clearCoreCacheKey(coreKey); + } + + // This cache stores two things: filters, and doc id sets. Calling + // clear only removes the doc id sets, but if we reach the situation + // that the cache does not contain any DocIdSet anymore, then it + // probably means that the user wanted to remove everything. + if (cache.getCacheSize() == 0) { + cache.clear(); + } + } + + @Override + public void close() { + assert shardKeyMap.size() == 0 : shardKeyMap.size(); + assert shardStats.isEmpty(); + assert stats2.isEmpty() : stats2; + cache.clear(); + } + + private static class Stats implements Cloneable { + + volatile long ramBytesUsed; + volatile long hitCount; + volatile long missCount; + volatile long cacheCount; + volatile long cacheSize; + + FilterCacheStats toQueryCacheStats() { + return new FilterCacheStats(ramBytesUsed, hitCount, missCount, cacheCount, cacheSize); + } + } + + private static class StatsAndCount { + int count; + final Stats stats; + + StatsAndCount(Stats stats) { + this.stats = stats; + this.count = 0; + } + } + + private boolean empty(Stats stats) { + if (stats == null) { + return true; + } + return stats.cacheSize == 0 && stats.ramBytesUsed == 0; + } + + public void onClose(ShardId shardId) { + assert empty(shardStats.get(shardId)); + shardStats.remove(shardId); + } +} diff --git a/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java b/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java index 5daf3682a1f..1f7486de4ac 100644 --- a/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java +++ b/src/main/java/org/elasticsearch/indices/cache/query/IndicesQueryCache.java @@ -31,7 +31,6 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.util.Accountable; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.search.SearchType; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -120,7 +119,7 @@ public class IndicesQueryCache extends AbstractComponent implements RemovalListe // defaults to 4, but this is a busy map for all indices, increase it a bit by default this.concurrencyLevel = settings.getAsInt(INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, 16); if (concurrencyLevel <= 0) { - throw new ElasticsearchIllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } buildCache(); diff --git a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index b10ff5d5bea..ca3b7e73744 100644 --- a/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -25,8 +25,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.base.Predicate; import com.google.common.collect.Lists; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.ExceptionsHelper; + import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; @@ -133,17 +132,17 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent 0 but was: " + concurrencyLevel); + throw new IllegalArgumentException("concurrency_level must be > 0 but was: " + concurrencyLevel); } cacheBuilder.concurrencyLevel(concurrencyLevel); if (expire != null && expire.millis() > 0) { diff --git a/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java b/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java index 14b94901736..c2f282eb396 100644 --- a/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java +++ b/src/main/java/org/elasticsearch/indices/memory/IndexingMemoryController.java @@ -125,19 +125,19 @@ public class IndexingMemoryController extends AbstractLifecycleComponent { - - @Override - public StartRecoveryRequest newInstance() { - return new StartRecoveryRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + class StartRecoveryTransportRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel) throws Exception { RecoveryResponse response = recover(request); diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java index 54e11c55556..6a429974f69 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoverySourceHandler.java @@ -146,7 +146,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { * and releasing the snapshot once all 3 phases of recovery are complete */ @Override - public void phase1(final SnapshotIndexCommit snapshot) throws ElasticsearchException { + public void phase1(final SnapshotIndexCommit snapshot) { cancellableThreads.checkForCancel(); // Total size of segment files that are recovered long totalSize = 0; @@ -428,7 +428,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { * of the translog and releasing it once all 3 phases of recovery are complete */ @Override - public void phase2(Translog.Snapshot snapshot) throws ElasticsearchException { + public void phase2(Translog.Snapshot snapshot) { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } @@ -479,7 +479,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { * three phases are released. */ @Override - public void phase3(Translog.Snapshot snapshot) throws ElasticsearchException { + public void phase3(Translog.Snapshot snapshot) { if (shard.state() == IndexShardState.CLOSED) { throw new IndexShardClosedException(request.shardId()); } @@ -567,7 +567,7 @@ public class RecoverySourceHandler implements Engine.RecoveryHandler { } }; for (DocumentMapper documentMapper : documentMappersToUpdate) { - mappingUpdatedAction.updateMappingOnMaster(indexService.index().getName(), indexService.indexUUID(), documentMapper.type(), documentMapper.mapping(), listener); + mappingUpdatedAction.updateMappingOnMaster(indexService.index().getName(), documentMapper.type(), documentMapper.mapping(), recoverySettings.internalActionTimeout(), listener); } cancellableThreads.execute(new Interruptable() { @Override diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java index 2e9f11efa82..c2af0657bc5 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryState.java @@ -20,8 +20,6 @@ package org.elasticsearch.indices.recovery; import com.google.common.collect.ImmutableList; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RestoreSource; import org.elasticsearch.common.Nullable; @@ -82,9 +80,9 @@ public class RecoveryState implements ToXContent, Streamable { return id; } - public static Stage fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static Stage fromId(byte id) { if (id < 0 || id >= STAGES.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return STAGES[id]; } @@ -115,9 +113,9 @@ public class RecoveryState implements ToXContent, Streamable { return id; } - public static Type fromId(byte id) throws ElasticsearchIllegalArgumentException { + public static Type fromId(byte id) { if (id < 0 || id >= TYPES.length) { - throw new ElasticsearchIllegalArgumentException("No mapping for id [" + id + "]"); + throw new IllegalArgumentException("No mapping for id [" + id + "]"); } return TYPES[id]; } @@ -170,7 +168,7 @@ public class RecoveryState implements ToXContent, Streamable { private void validateAndSetStage(Stage expected, Stage next) { if (stage != expected) { - throw new ElasticsearchIllegalStateException("can't move recovery to stage [" + next + "]. current stage: [" + throw new IllegalStateException("can't move recovery to stage [" + next + "]. current stage: [" + stage + "] (expected [" + expected + "])"); } stage = next; @@ -209,7 +207,7 @@ public class RecoveryState implements ToXContent, Streamable { getTimer().stop(); break; default: - throw new ElasticsearchIllegalArgumentException("unknown RecoveryState.Stage [" + stage + "]"); + throw new IllegalArgumentException("unknown RecoveryState.Stage [" + stage + "]"); } return this; } diff --git a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index 4382f6bdb29..fcfc9722a03 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -95,12 +95,12 @@ public class RecoveryTarget extends AbstractComponent { this.clusterService = clusterService; this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); - transportService.registerHandler(Actions.FILES_INFO, new FilesInfoRequestHandler()); - transportService.registerHandler(Actions.FILE_CHUNK, new FileChunkTransportRequestHandler()); - transportService.registerHandler(Actions.CLEAN_FILES, new CleanFilesRequestHandler()); - transportService.registerHandler(Actions.PREPARE_TRANSLOG, new PrepareForTranslogOperationsRequestHandler()); - transportService.registerHandler(Actions.TRANSLOG_OPS, new TranslogOperationsRequestHandler()); - transportService.registerHandler(Actions.FINALIZE, new FinalizeRecoveryRequestHandler()); + transportService.registerRequestHandler(Actions.FILES_INFO, RecoveryFilesInfoRequest.class, ThreadPool.Names.GENERIC, new FilesInfoRequestHandler()); + transportService.registerRequestHandler(Actions.FILE_CHUNK, RecoveryFileChunkRequest.class, ThreadPool.Names.GENERIC, new FileChunkTransportRequestHandler()); + transportService.registerRequestHandler(Actions.CLEAN_FILES, RecoveryCleanFilesRequest.class, ThreadPool.Names.GENERIC, new CleanFilesRequestHandler()); + transportService.registerRequestHandler(Actions.PREPARE_TRANSLOG, RecoveryPrepareForTranslogOperationsRequest.class, ThreadPool.Names.GENERIC, new PrepareForTranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.TRANSLOG_OPS, RecoveryTranslogOperationsRequest.class, ThreadPool.Names.GENERIC, new TranslogOperationsRequestHandler()); + transportService.registerRequestHandler(Actions.FINALIZE, RecoveryFinalizeRecoveryRequest.class, ThreadPool.Names.GENERIC, new FinalizeRecoveryRequestHandler()); indicesLifecycle.addListener(new IndicesLifecycle.Listener() { @Override @@ -267,17 +267,7 @@ public class RecoveryTarget extends AbstractComponent { void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure); } - class PrepareForTranslogOperationsRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryPrepareForTranslogOperationsRequest newInstance() { - return new RecoveryPrepareForTranslogOperationsRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception { @@ -290,17 +280,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class FinalizeRecoveryRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryFinalizeRecoveryRequest newInstance() { - return new RecoveryFinalizeRecoveryRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class FinalizeRecoveryRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryFinalizeRecoveryRequest request, TransportChannel channel) throws Exception { @@ -312,18 +292,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class TranslogOperationsRequestHandler extends BaseTransportRequestHandler { - - - @Override - public RecoveryTranslogOperationsRequest newInstance() { - return new RecoveryTranslogOperationsRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class TranslogOperationsRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryTranslogOperationsRequest request, TransportChannel channel) throws Exception { @@ -339,17 +308,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class FilesInfoRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryFilesInfoRequest newInstance() { - return new RecoveryFilesInfoRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class FilesInfoRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryFilesInfoRequest request, TransportChannel channel) throws Exception { @@ -370,17 +329,7 @@ public class RecoveryTarget extends AbstractComponent { } } - class CleanFilesRequestHandler extends BaseTransportRequestHandler { - - @Override - public RecoveryCleanFilesRequest newInstance() { - return new RecoveryCleanFilesRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } + class CleanFilesRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryCleanFilesRequest request, TransportChannel channel) throws Exception { @@ -419,21 +368,11 @@ public class RecoveryTarget extends AbstractComponent { } } - class FileChunkTransportRequestHandler extends BaseTransportRequestHandler { + class FileChunkTransportRequestHandler implements TransportRequestHandler { // How many bytes we've copied since we last called RateLimiter.pause final AtomicLong bytesSinceLastPause = new AtomicLong(); - @Override - public RecoveryFileChunkRequest newInstance() { - return new RecoveryFileChunkRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - @Override public void messageReceived(final RecoveryFileChunkRequest request, TransportChannel channel) throws Exception { try (RecoveriesCollection.StatusRef statusRef = onGoingRecoveries.getStatusSafe(request.recoveryId(), request.shardId())) { diff --git a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java index b347f662a1e..bacd084d058 100644 --- a/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java +++ b/src/main/java/org/elasticsearch/indices/recovery/SharedFSRecoverySourceHandler.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.recovery; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.action.index.MappingUpdatedAction; import org.elasticsearch.common.logging.ESLogger; @@ -46,12 +45,12 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { } @Override - public void phase1(SnapshotIndexCommit snapshot) throws ElasticsearchException { + public void phase1(SnapshotIndexCommit snapshot) { if (request.recoveryType() == RecoveryState.Type.RELOCATION && shard.routingEntry().primary()) { // here we simply fail the primary shard since we can't move them (have 2 writers open at the same time) // by failing the shard we play safe and just go through the entire reallocation procedure of the primary // it would be ideal to make sure we flushed the translog here but that is not possible in the current design. - ElasticsearchIllegalStateException exception = new ElasticsearchIllegalStateException("Can't relocate primary - failing"); + IllegalStateException exception = new IllegalStateException("Can't relocate primary - failing"); shard.failShard("primary_relocation", exception); throw exception; } @@ -60,7 +59,7 @@ public class SharedFSRecoverySourceHandler extends RecoverySourceHandler { @Override - protected int sendSnapshot(Translog.Snapshot snapshot) throws ElasticsearchException { + protected int sendSnapshot(Translog.Snapshot snapshot) { logger.trace("{} recovery [phase3] to {}: skipping transaction log operations for file sync", shard.shardId(), request.targetNode()); return 0; } diff --git a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java index 643f91172b2..36c9be862ee 100644 --- a/src/main/java/org/elasticsearch/indices/store/IndicesStore.java +++ b/src/main/java/org/elasticsearch/indices/store/IndicesStore.java @@ -89,8 +89,6 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe } } - private final NodeEnvironment nodeEnv; - private final NodeSettingsService nodeSettingsService; private final IndicesService indicesService; @@ -107,15 +105,14 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe private TimeValue deleteShardTimeout; @Inject - public IndicesStore(Settings settings, NodeEnvironment nodeEnv, NodeSettingsService nodeSettingsService, IndicesService indicesService, + public IndicesStore(Settings settings, NodeSettingsService nodeSettingsService, IndicesService indicesService, ClusterService clusterService, TransportService transportService) { super(settings); - this.nodeEnv = nodeEnv; this.nodeSettingsService = nodeSettingsService; this.indicesService = indicesService; this.clusterService = clusterService; this.transportService = transportService; - transportService.registerHandler(ACTION_SHARD_EXISTS, new ShardActiveRequestHandler()); + transportService.registerRequestHandler(ACTION_SHARD_EXISTS, ShardActiveRequest.class, ThreadPool.Names.SAME, new ShardActiveRequestHandler()); // we don't limit by default (we default to CMS's auto throttle instead): this.rateLimitingType = settings.get("indices.store.throttle.type", StoreRateLimiting.Type.NONE.name()); @@ -133,7 +130,6 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe IndicesStore() { super(ImmutableSettings.EMPTY); - nodeEnv = null; nodeSettingsService = null; indicesService = null; this.clusterService = null; @@ -328,17 +324,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe } - private class ShardActiveRequestHandler extends BaseTransportRequestHandler { - - @Override - public ShardActiveRequest newInstance() { - return new ShardActiveRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } + private class ShardActiveRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final ShardActiveRequest request, final TransportChannel channel) throws Exception { @@ -346,56 +332,57 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe // make sure shard is really there before register cluster state observer if (indexShard == null) { channel.sendResponse(new ShardActiveResponse(false, clusterService.localNode())); - } - // create observer here. we need to register it here because we need to capture the current cluster state - // which will then be compared to the one that is applied when we call waitForNextChange(). if we create it - // later we might miss an update and wait forever in case no new cluster state comes in. - // in general, using a cluster state observer here is a workaround for the fact that we cannot listen on shard state changes explicitly. - // instead we wait for the cluster state changes because we know any shard state change will trigger or be - // triggered by a cluster state change. - ClusterStateObserver observer = new ClusterStateObserver(clusterService, request.timeout, logger); - // check if shard is active. if so, all is good - boolean shardActive = shardActive(indexShard); - if (shardActive) { - channel.sendResponse(new ShardActiveResponse(true, clusterService.localNode())); } else { - // shard is not active, might be POST_RECOVERY so check if cluster state changed inbetween or wait for next change - observer.waitForNextChange(new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - sendResult(shardActive(getShard(request))); - } - - @Override - public void onClusterServiceClose() { - sendResult(false); - } - - @Override - public void onTimeout(TimeValue timeout) { - sendResult(shardActive(getShard(request))); - } - - public void sendResult(boolean shardActive) { - try { - channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); - } catch (IOException e) { - logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); - } catch (EsRejectedExecutionException e) { - logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + // create observer here. we need to register it here because we need to capture the current cluster state + // which will then be compared to the one that is applied when we call waitForNextChange(). if we create it + // later we might miss an update and wait forever in case no new cluster state comes in. + // in general, using a cluster state observer here is a workaround for the fact that we cannot listen on shard state changes explicitly. + // instead we wait for the cluster state changes because we know any shard state change will trigger or be + // triggered by a cluster state change. + ClusterStateObserver observer = new ClusterStateObserver(clusterService, request.timeout, logger); + // check if shard is active. if so, all is good + boolean shardActive = shardActive(indexShard); + if (shardActive) { + channel.sendResponse(new ShardActiveResponse(true, clusterService.localNode())); + } else { + // shard is not active, might be POST_RECOVERY so check if cluster state changed inbetween or wait for next change + observer.waitForNextChange(new ClusterStateObserver.Listener() { + @Override + public void onNewClusterState(ClusterState state) { + sendResult(shardActive(getShard(request))); } - } - }, new ClusterStateObserver.ValidationPredicate() { - @Override - protected boolean validate(ClusterState newState) { - // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified - // or the shard is active in which case we want to send back that the shard is active - // here we could also evaluate the cluster state and get the information from there. we - // don't do it because we would have to write another method for this that would have the same effect - IndexShard indexShard = getShard(request); - return indexShard == null || shardActive(indexShard); - } - }); + + @Override + public void onClusterServiceClose() { + sendResult(false); + } + + @Override + public void onTimeout(TimeValue timeout) { + sendResult(shardActive(getShard(request))); + } + + public void sendResult(boolean shardActive) { + try { + channel.sendResponse(new ShardActiveResponse(shardActive, clusterService.localNode())); + } catch (IOException e) { + logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + } catch (EsRejectedExecutionException e) { + logger.error("failed send response for shard active while trying to delete shard {} - shard will probably not be removed", e, request.shardId); + } + } + }, new ClusterStateObserver.ValidationPredicate() { + @Override + protected boolean validate(ClusterState newState) { + // the shard is not there in which case we want to send back a false (shard is not active), so the cluster state listener must be notified + // or the shard is active in which case we want to send back that the shard is active + // here we could also evaluate the cluster state and get the information from there. we + // don't do it because we would have to write another method for this that would have the same effect + IndexShard indexShard = getShard(request); + return indexShard == null || shardActive(indexShard); + } + }); + } } } diff --git a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java index 1f0bf84a2f4..f95cfd8ece9 100644 --- a/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java +++ b/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetaData.java @@ -43,6 +43,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.indices.IndicesService; @@ -70,7 +71,8 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio @Inject public TransportNodesListShardStoreMetaData(Settings settings, ClusterName clusterName, ThreadPool threadPool, ClusterService clusterService, TransportService transportService, IndicesService indicesService, NodeEnvironment nodeEnv, ActionFilters actionFilters) { - super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters); + super(settings, ACTION_NAME, clusterName, threadPool, clusterService, transportService, actionFilters, + Request.class, NodeRequest.class, ThreadPool.Names.GENERIC); this.indicesService = indicesService; this.nodeEnv = nodeEnv; } @@ -79,21 +81,6 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio return execute(new Request(shardId, onlyUnallocated, nodesIds).timeout(timeout)); } - @Override - protected String executor() { - return ThreadPool.Names.GENERIC; - } - - @Override - protected Request newRequestInstance() { - return new Request(); - } - - @Override - protected NodeRequest newNodeRequest() { - return new NodeRequest(); - } - @Override protected NodeRequest newNodeRequest(String nodeId, Request request) { return new NodeRequest(nodeId, request); @@ -123,7 +110,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio } @Override - protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) throws ElasticsearchException { + protected NodeStoreFilesMetaData nodeOperation(NodeRequest request) { if (request.unallocated) { IndexService indexService = indicesService.indexService(request.shardId.index().name()); if (indexService == null) { @@ -168,7 +155,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio if (metaData == null) { return new StoreFilesMetaData(false, shardId, ImmutableMap.of()); } - String storeType = metaData.settings().get("index.store.type", "fs"); + String storeType = metaData.settings().get(IndexStoreModule.STORE_TYPE, "fs"); if (!storeType.contains("fs")) { return new StoreFilesMetaData(false, shardId, ImmutableMap.of()); } diff --git a/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java b/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java index f70a74521b3..9adbeb88280 100644 --- a/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java +++ b/src/main/java/org/elasticsearch/indices/ttl/IndicesTTLService.java @@ -91,12 +91,12 @@ public class IndicesTTLService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { jvmMonitorService.start(); } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { jvmMonitorService.stop(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { jvmMonitorService.close(); } } diff --git a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java index d9b4f5c2462..861297f3482 100644 --- a/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java +++ b/src/main/java/org/elasticsearch/monitor/jvm/HotThreads.java @@ -20,7 +20,6 @@ package org.elasticsearch.monitor.jvm; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.joda.FormatDateTimeFormatter; import org.elasticsearch.common.joda.Joda; import org.elasticsearch.common.unit.TimeValue; @@ -75,7 +74,7 @@ public class HotThreads { if ("cpu".equals(type) || "wait".equals(type) || "block".equals(type)) { this.type = type; } else { - throw new ElasticsearchIllegalArgumentException("type not supported [" + type + "]"); + throw new IllegalArgumentException("type not supported [" + type + "]"); } return this; } diff --git a/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java b/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java index 16bcd2077b5..64b0eeeefe3 100644 --- a/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java +++ b/src/main/java/org/elasticsearch/monitor/jvm/JvmMonitorService.java @@ -109,7 +109,7 @@ public class JvmMonitorService extends AbstractLifecycleComponent tuple = InternalSettingsPreparer.prepareSettings(pSettings, loadConfigSettings); @@ -143,9 +142,8 @@ public class Node implements Releasable { if (logger.isDebugEnabled()) { Environment env = tuple.v2(); - logger.debug("using home [{}], config [{}], data [{}], logs [{}], work [{}], plugins [{}]", - env.homeFile(), env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(), - env.workFile(), env.pluginsFile()); + logger.debug("using home [{}], config [{}], data [{}], logs [{}], plugins [{}]", + env.homeFile(), env.configFile(), Arrays.toString(env.dataFiles()), env.logsFile(), env.pluginsFile()); } this.pluginsService = new PluginsService(tuple.v1(), tuple.v2()); @@ -158,7 +156,7 @@ public class Node implements Releasable { try { nodeEnvironment = new NodeEnvironment(this.settings, this.environment); } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Failed to created node environment", ex); + throw new IllegalStateException("Failed to created node environment", ex); } boolean success = false; @@ -242,7 +240,7 @@ public class Node implements Releasable { injector.getInstance(plugin).start(); } - injector.getInstance(MappingUpdatedAction.class).start(); + injector.getInstance(MappingUpdatedAction.class).setClient(client); injector.getInstance(IndicesService.class).start(); injector.getInstance(IndexingMemoryController.class).start(); injector.getInstance(IndicesClusterStateService.class).start(); @@ -285,7 +283,6 @@ public class Node implements Releasable { injector.getInstance(HttpServer.class).stop(); } - injector.getInstance(MappingUpdatedAction.class).stop(); injector.getInstance(RiversManager.class).stop(); injector.getInstance(SnapshotsService.class).stop(); diff --git a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java index 618703af3aa..a074d975c04 100644 --- a/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java +++ b/src/main/java/org/elasticsearch/percolator/MultiDocumentPercolatorIndex.java @@ -71,7 +71,9 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex { try { MultiReader mReader = new MultiReader(memoryIndices, true); LeafReader slowReader = SlowCompositeReaderWrapper.wrap(mReader); - DocSearcher docSearcher = new DocSearcher(new IndexSearcher(slowReader), rootDocMemoryIndex); + final IndexSearcher slowSearcher = new IndexSearcher(slowReader); + slowSearcher.setQueryCache(null); + DocSearcher docSearcher = new DocSearcher(slowSearcher, rootDocMemoryIndex); context.initialize(docSearcher, parsedDocument); } catch (IOException e) { throw new ElasticsearchException("Failed to create index for percolator with nested document ", e); @@ -107,7 +109,7 @@ class MultiDocumentPercolatorIndex implements PercolatorIndex { } @Override - public void close() throws ElasticsearchException { + public void close() { try { this.reader().close(); rootDocMemoryIndex.reset(); diff --git a/src/main/java/org/elasticsearch/percolator/PercolateContext.java b/src/main/java/org/elasticsearch/percolator/PercolateContext.java index b5dfc37bcd6..d752052e829 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolateContext.java +++ b/src/main/java/org/elasticsearch/percolator/PercolateContext.java @@ -439,11 +439,6 @@ public class PercolateContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return indexService.cache().filter(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return indexService.bitsetFilterCache(); diff --git a/src/main/java/org/elasticsearch/percolator/PercolatorService.java b/src/main/java/org/elasticsearch/percolator/PercolatorService.java index 1fa8aa85693..1284edc4902 100644 --- a/src/main/java/org/elasticsearch/percolator/PercolatorService.java +++ b/src/main/java/org/elasticsearch/percolator/PercolatorService.java @@ -19,6 +19,7 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.ByteObjectOpenHashMap; +import com.google.common.collect.Lists; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; @@ -31,12 +32,12 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CloseableThreadLocal; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.percolate.PercolateResponse; import org.elasticsearch.action.percolate.PercolateShardRequest; @@ -70,14 +71,12 @@ import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; -import org.elasticsearch.index.mapper.MapperUtils; import org.elasticsearch.index.mapper.Mapping; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.percolator.stats.ShardPercolateService; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.percolator.QueryCollector.Count; @@ -88,8 +87,11 @@ import org.elasticsearch.script.ScriptService; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationPhase; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.highlight.HighlightField; import org.elasticsearch.search.highlight.HighlightPhase; import org.elasticsearch.search.internal.SearchContext; @@ -198,7 +200,7 @@ public class PercolatorService extends AbstractComponent { if (request.docSource() != null && request.docSource().length() != 0) { parsedDocument = parseFetchedDoc(context, request.docSource(), percolateIndexService, request.documentType()); } else if (parsedDocument == null) { - throw new ElasticsearchIllegalArgumentException("Nothing to percolate"); + throw new IllegalArgumentException("Nothing to percolate"); } if (context.percolateQuery() == null && (context.trackScores() || context.doSort || context.aggregations() != null) || context.aliasFilter() != null) { @@ -206,11 +208,11 @@ public class PercolatorService extends AbstractComponent { } if (context.doSort && !context.limit) { - throw new ElasticsearchIllegalArgumentException("Can't sort if size isn't specified"); + throw new IllegalArgumentException("Can't sort if size isn't specified"); } if (context.highlight() != null && !context.limit) { - throw new ElasticsearchIllegalArgumentException("Can't highlight if size isn't specified"); + throw new IllegalArgumentException("Can't highlight if size isn't specified"); } if (context.size() < 0) { @@ -249,7 +251,7 @@ public class PercolatorService extends AbstractComponent { } } - private ParsedDocument parseRequest(IndexService documentIndexService, PercolateShardRequest request, PercolateContext context) throws ElasticsearchException { + private ParsedDocument parseRequest(IndexService documentIndexService, PercolateShardRequest request, PercolateContext context) { BytesReference source = request.source(); if (source == null || source.length() == 0) { return null; @@ -289,7 +291,7 @@ public class PercolatorService extends AbstractComponent { doc.addDynamicMappingsUpdate(docMapper.v2()); } if (doc.dynamicMappingsUpdate() != null) { - mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), documentIndexService.indexUUID(), request.documentType(), doc.dynamicMappingsUpdate()); + mappingUpdatedAction.updateMappingOnMasterSynchronously(request.shardId().getIndex(), request.documentType(), doc.dynamicMappingsUpdate()); } // the document parsing exists the "doc" object, so we need to set the new current field. currentFieldName = parser.currentName(); @@ -455,7 +457,7 @@ public class PercolatorService extends AbstractComponent { for (Map.Entry entry : context.percolateQueries().entrySet()) { try { if (isNested) { - Lucene.exists(context.docSearcher(), entry.getValue(), NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(context.docSearcher(), entry.getValue(), Queries.newNonNestedFilter(), collector); } else { Lucene.exists(context.docSearcher(), entry.getValue(), collector); } @@ -555,7 +557,7 @@ public class PercolatorService extends AbstractComponent { } try { if (isNested) { - Lucene.exists(context.docSearcher(), entry.getValue(), NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(context.docSearcher(), entry.getValue(), Queries.newNonNestedFilter(), collector); } else { Lucene.exists(context.docSearcher(), entry.getValue(), collector); } @@ -793,14 +795,13 @@ public class PercolatorService extends AbstractComponent { private void queryBasedPercolating(Engine.Searcher percolatorSearcher, PercolateContext context, QueryCollector percolateCollector) throws IOException { Filter percolatorTypeFilter = context.indexService().mapperService().documentMapper(TYPE_NAME).typeFilter(); - percolatorTypeFilter = context.indexService().cache().filter().cache(percolatorTypeFilter, null, context.queryParserService().autoFilterCachePolicy()); final Filter filter; if (context.aliasFilter() != null) { BooleanQuery booleanFilter = new BooleanQuery(); booleanFilter.add(context.aliasFilter(), BooleanClause.Occur.MUST); booleanFilter.add(percolatorTypeFilter, BooleanClause.Occur.MUST); - filter = Queries.wrap(booleanFilter); + filter = new QueryWrapperFilter(booleanFilter); } else { filter = percolatorTypeFilter; } @@ -849,15 +850,24 @@ public class PercolatorService extends AbstractComponent { return null; } - if (shardResults.size() == 1) { - return shardResults.get(0).aggregations(); - } - List aggregationsList = new ArrayList<>(shardResults.size()); for (PercolateShardResponse shardResult : shardResults) { aggregationsList.add(shardResult.aggregations()); } - return InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); + InternalAggregations aggregations = InternalAggregations.reduce(aggregationsList, new ReduceContext(bigArrays, scriptService)); + if (aggregations != null) { + List reducers = shardResults.get(0).reducers(); + if (reducers != null) { + List newAggs = new ArrayList<>(Lists.transform(aggregations.asList(), Reducer.AGGREGATION_TRANFORM_FUNCTION)); + for (SiblingReducer reducer : reducers) { + InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays, + scriptService)); + newAggs.add(newAgg); + } + aggregations = new InternalAggregations(newAggs); + } + } + return aggregations; } } diff --git a/src/main/java/org/elasticsearch/percolator/QueryCollector.java b/src/main/java/org/elasticsearch/percolator/QueryCollector.java index f289e188167..e21e77612d6 100644 --- a/src/main/java/org/elasticsearch/percolator/QueryCollector.java +++ b/src/main/java/org/elasticsearch/percolator/QueryCollector.java @@ -19,6 +19,7 @@ package org.elasticsearch.percolator; import com.carrotsearch.hppc.FloatArrayList; + import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; @@ -30,13 +31,13 @@ import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; @@ -180,7 +181,7 @@ abstract class QueryCollector extends SimpleCollector { } if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } @@ -239,7 +240,7 @@ abstract class QueryCollector extends SimpleCollector { // run the query try { if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } @@ -311,7 +312,7 @@ abstract class QueryCollector extends SimpleCollector { context.hitContext().cache().clear(); } if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } @@ -372,7 +373,7 @@ abstract class QueryCollector extends SimpleCollector { // run the query try { if (isNestedDoc) { - Lucene.exists(searcher, query, NonNestedDocsFilter.INSTANCE, collector); + Lucene.exists(searcher, query, Queries.newNonNestedFilter(), collector); } else { Lucene.exists(searcher, query, collector); } diff --git a/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java b/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java index 7df3d66e948..165193d35f0 100644 --- a/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java +++ b/src/main/java/org/elasticsearch/percolator/SingleDocumentPercolatorIndex.java @@ -77,7 +77,7 @@ class SingleDocumentPercolatorIndex implements PercolatorIndex { } @Override - public void close() throws ElasticsearchException { + public void close() { try { this.reader().close(); memoryIndex.reset(); diff --git a/src/main/java/org/elasticsearch/plugins/PluginManager.java b/src/main/java/org/elasticsearch/plugins/PluginManager.java index fbd89b9f27a..5678c03fdb9 100644 --- a/src/main/java/org/elasticsearch/plugins/PluginManager.java +++ b/src/main/java/org/elasticsearch/plugins/PluginManager.java @@ -119,7 +119,7 @@ public class PluginManager { public void downloadAndExtract(String name) throws IOException { if (name == null) { - throw new ElasticsearchIllegalArgumentException("plugin name must be supplied with --install [name]."); + throw new IllegalArgumentException("plugin name must be supplied with --install [name]."); } HttpDownloadHelper downloadHelper = new HttpDownloadHelper(); boolean downloaded = false; @@ -293,7 +293,7 @@ public class PluginManager { public void removePlugin(String name) throws IOException { if (name == null) { - throw new ElasticsearchIllegalArgumentException("plugin name must be supplied with --remove [name]."); + throw new IllegalArgumentException("plugin name must be supplied with --remove [name]."); } PluginHandle pluginHandle = PluginHandle.parse(name); boolean removed = false; @@ -342,7 +342,7 @@ public class PluginManager { private static void checkForForbiddenName(String name) { if (!hasLength(name) || BLACKLIST.contains(name.toLowerCase(Locale.ROOT))) { - throw new ElasticsearchIllegalArgumentException("Illegal plugin name: " + name); + throw new IllegalArgumentException("Illegal plugin name: " + name); } } @@ -485,7 +485,7 @@ public class PluginManager { pluginManager.log("-> Removing " + Strings.nullToEmpty(pluginName) + "..."); pluginManager.removePlugin(pluginName); exitCode = EXIT_CODE_OK; - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { exitCode = EXIT_CODE_CMD_USAGE; pluginManager.log("Failed to remove " + pluginName + ", reason: " + e.getMessage()); } catch (IOException e) { @@ -526,12 +526,12 @@ public class PluginManager { * @return Never {@code null}. The trimmed value. * @throws NullPointerException if {@code args} is {@code null}. * @throws ArrayIndexOutOfBoundsException if {@code arg} is negative. - * @throws ElasticsearchIllegalStateException if {@code arg} is >= {@code args.length}. - * @throws ElasticsearchIllegalArgumentException if the value evaluates to blank ({@code null} or only whitespace) + * @throws IllegalStateException if {@code arg} is >= {@code args.length}. + * @throws IllegalArgumentException if the value evaluates to blank ({@code null} or only whitespace) */ private static String getCommandValue(String[] args, int arg, String flag) { if (arg >= args.length) { - throw new ElasticsearchIllegalStateException("missing value for " + flag + ". Usage: " + flag + " [value]"); + throw new IllegalStateException("missing value for " + flag + ". Usage: " + flag + " [value]"); } // avoid having to interpret multiple forms of unset @@ -539,7 +539,7 @@ public class PluginManager { // If we had a value that is blank, then fail immediately if (trimmedValue == null) { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "value for " + flag + "('" + args[arg] + "') must be set. Usage: " + flag + " [value]"); } diff --git a/src/main/java/org/elasticsearch/plugins/PluginsService.java b/src/main/java/org/elasticsearch/plugins/PluginsService.java index 7a7c569acbe..3d4b9f86e8a 100644 --- a/src/main/java/org/elasticsearch/plugins/PluginsService.java +++ b/src/main/java/org/elasticsearch/plugins/PluginsService.java @@ -25,7 +25,6 @@ import com.google.common.collect.*; import org.apache.lucene.util.Constants; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.info.PluginInfo; import org.elasticsearch.action.admin.cluster.node.info.PluginsInfo; @@ -123,7 +122,7 @@ public class PluginsService extends AbstractComponent { try { loadPluginsIntoClassLoader(); } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Can't load plugins into classloader", ex); + throw new IllegalStateException("Can't load plugins into classloader", ex); } if (loadClasspathPlugins) { tupleBuilder.addAll(loadPluginsFromClasspath(settings)); @@ -147,7 +146,7 @@ public class PluginsService extends AbstractComponent { sitePlugins.add(tuple.v1().getName()); } } catch (IOException ex) { - throw new ElasticsearchIllegalStateException("Can't load site plugins", ex); + throw new IllegalStateException("Can't load site plugins", ex); } // Checking expected plugins diff --git a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index 36fbf05c2ec..e88ff922d72 100644 --- a/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -21,8 +21,7 @@ package org.elasticsearch.repositories; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableMap; -import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; + import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; @@ -34,7 +33,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Injector; -import org.elasticsearch.common.inject.Injectors; import org.elasticsearch.common.inject.ModulesBuilder; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; @@ -413,7 +411,7 @@ public class RepositoriesService extends AbstractComponent implements ClusterSta private void ensureRepositoryNotInUse(ClusterState clusterState, String repository) { if (SnapshotsService.isRepositoryInUse(clusterState, repository) || RestoreService.isRepositoryInUse(clusterState, repository)) { - throw new ElasticsearchIllegalStateException("trying to modify or unregister repository that is currently used "); + throw new IllegalStateException("trying to modify or unregister repository that is currently used "); } } diff --git a/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java b/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java index 3150c8e6b29..11fb1cb3225 100644 --- a/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java +++ b/src/main/java/org/elasticsearch/repositories/VerifyNodeRepositoryAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.repositories; import com.carrotsearch.hppc.ObjectContainer; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -30,7 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.snapshots.IndexShardRepository; import org.elasticsearch.threadpool.ThreadPool; @@ -58,7 +56,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { this.transportService = transportService; this.clusterService = clusterService; this.repositoriesService = repositoriesService; - transportService.registerHandler(ACTION_NAME, new VerifyNodeRepositoryRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest.class, ThreadPool.Names.SAME, new VerifyNodeRepositoryRequestHandler()); } public void close() { @@ -117,16 +115,15 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { blobStoreIndexShardRepository.verify(verificationToken); } - private class VerifyNodeRepositoryRequest extends TransportRequest { + static class VerifyNodeRepositoryRequest extends TransportRequest { private String repository; - private String verificationToken; - private VerifyNodeRepositoryRequest() { + VerifyNodeRepositoryRequest() { } - private VerifyNodeRepositoryRequest(String repository, String verificationToken) { + VerifyNodeRepositoryRequest(String repository, String verificationToken) { this.repository = repository; this.verificationToken = verificationToken; } @@ -146,18 +143,7 @@ public class VerifyNodeRepositoryAction extends AbstractComponent { } } - private class VerifyNodeRepositoryRequestHandler extends BaseTransportRequestHandler { - - @Override - public VerifyNodeRepositoryRequest newInstance() { - return new VerifyNodeRepositoryRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - + class VerifyNodeRepositoryRequestHandler implements TransportRequestHandler { @Override public void messageReceived(VerifyNodeRepositoryRequest request, TransportChannel channel) throws Exception { doVerify(request.repository, request.verificationToken); diff --git a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d8e3569c3d7..180f6595521 100644 --- a/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,7 +26,6 @@ import com.google.common.collect.Maps; import com.google.common.io.ByteStreams; import org.apache.lucene.store.RateLimiter; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -159,7 +158,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { for (RestFilter filter : filters) { filter.close(); } @@ -136,7 +134,7 @@ public class RestController extends AbstractLifecycleComponent { headHandlers.insert(path, handler); break; default: - throw new ElasticsearchIllegalArgumentException("Can't handle [" + method + "] for path [" + path + "]"); + throw new IllegalArgumentException("Can't handle [" + method + "] for path [" + path + "]"); } } @@ -260,7 +258,7 @@ public class RestController extends AbstractLifecycleComponent { try { int loc = index.getAndIncrement(); if (loc > filters.length) { - throw new ElasticsearchIllegalStateException("filter continueProcessing was called more than expected"); + throw new IllegalStateException("filter continueProcessing was called more than expected"); } else if (loc == filters.length) { executionFilter.process(request, channel, this); } else { diff --git a/src/main/java/org/elasticsearch/rest/RestFilter.java b/src/main/java/org/elasticsearch/rest/RestFilter.java index 398f157d7b5..dd86c026b75 100644 --- a/src/main/java/org/elasticsearch/rest/RestFilter.java +++ b/src/main/java/org/elasticsearch/rest/RestFilter.java @@ -37,7 +37,7 @@ public abstract class RestFilter implements Closeable { } @Override - public void close() throws ElasticsearchException { + public void close() { // a no op } diff --git a/src/main/java/org/elasticsearch/rest/RestRequest.java b/src/main/java/org/elasticsearch/rest/RestRequest.java index a908e5c55a7..6f2fd926ab9 100644 --- a/src/main/java/org/elasticsearch/rest/RestRequest.java +++ b/src/main/java/org/elasticsearch/rest/RestRequest.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.ContextHolder; import org.elasticsearch.common.Nullable; @@ -97,7 +96,7 @@ public abstract class RestRequest extends ContextHolder implements ToXContent.Pa try { return Float.parseFloat(sValue); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse float parameter [" + key + "] with value [" + sValue + "]", e); + throw new IllegalArgumentException("Failed to parse float parameter [" + key + "] with value [" + sValue + "]", e); } } @@ -109,7 +108,7 @@ public abstract class RestRequest extends ContextHolder implements ToXContent.Pa try { return Integer.parseInt(sValue); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); + throw new IllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); } } @@ -121,7 +120,7 @@ public abstract class RestRequest extends ContextHolder implements ToXContent.Pa try { return Long.parseLong(sValue); } catch (NumberFormatException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); + throw new IllegalArgumentException("Failed to parse int parameter [" + key + "] with value [" + sValue + "]", e); } } diff --git a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java index 9bc0d989058..424d6d0d954 100644 --- a/src/main/java/org/elasticsearch/rest/action/RestActionModule.java +++ b/src/main/java/org/elasticsearch/rest/action/RestActionModule.java @@ -28,7 +28,6 @@ import org.elasticsearch.rest.action.admin.cluster.repositories.verify.RestVerif import org.elasticsearch.rest.action.admin.cluster.health.RestClusterHealthAction; import org.elasticsearch.rest.action.admin.cluster.node.hotthreads.RestNodesHotThreadsAction; import org.elasticsearch.rest.action.admin.cluster.node.info.RestNodesInfoAction; -import org.elasticsearch.rest.action.admin.cluster.node.shutdown.RestNodesShutdownAction; import org.elasticsearch.rest.action.admin.cluster.node.stats.RestNodesStatsAction; import org.elasticsearch.rest.action.admin.cluster.repositories.delete.RestDeleteRepositoryAction; import org.elasticsearch.rest.action.admin.cluster.repositories.get.RestGetRepositoriesAction; @@ -82,8 +81,8 @@ import org.elasticsearch.rest.action.admin.indices.recovery.RestRecoveryAction; import org.elasticsearch.rest.action.bulk.RestBulkAction; import org.elasticsearch.rest.action.cat.*; import org.elasticsearch.rest.action.delete.RestDeleteAction; -import org.elasticsearch.rest.action.deletebyquery.RestDeleteByQueryAction; import org.elasticsearch.rest.action.explain.RestExplainAction; +import org.elasticsearch.rest.action.fieldstats.RestFieldStatsAction; import org.elasticsearch.rest.action.get.RestGetAction; import org.elasticsearch.rest.action.get.RestGetSourceAction; import org.elasticsearch.rest.action.get.RestHeadAction; @@ -131,7 +130,6 @@ public class RestActionModule extends AbstractModule { bind(RestNodesInfoAction.class).asEagerSingleton(); bind(RestNodesStatsAction.class).asEagerSingleton(); bind(RestNodesHotThreadsAction.class).asEagerSingleton(); - bind(RestNodesShutdownAction.class).asEagerSingleton(); bind(RestClusterStatsAction.class).asEagerSingleton(); bind(RestClusterStateAction.class).asEagerSingleton(); bind(RestClusterHealthAction.class).asEagerSingleton(); @@ -195,7 +193,6 @@ public class RestActionModule extends AbstractModule { bind(RestHeadAction.class).asEagerSingleton(); bind(RestMultiGetAction.class).asEagerSingleton(); bind(RestDeleteAction.class).asEagerSingleton(); - bind(RestDeleteByQueryAction.class).asEagerSingleton(); bind(org.elasticsearch.rest.action.count.RestCountAction.class).asEagerSingleton(); bind(RestSuggestAction.class).asEagerSingleton(); bind(RestTermVectorsAction.class).asEagerSingleton(); @@ -229,6 +226,8 @@ public class RestActionModule extends AbstractModule { bind(RestDeleteIndexedScriptAction.class).asEagerSingleton(); + bind(RestFieldStatsAction.class).asEagerSingleton(); + // cat API Multibinder catActionMultibinder = Multibinder.newSetBinder(binder(), AbstractCatAction.class); catActionMultibinder.addBinding().to(RestAllocationAction.class).asEagerSingleton(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java deleted file mode 100644 index 950ce9083af..00000000000 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/node/shutdown/RestNodesShutdownAction.java +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.admin.cluster.node.shutdown; - -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownRequest; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.rest.*; -import org.elasticsearch.rest.action.support.RestBuilderListener; - -/** - * - */ -public class RestNodesShutdownAction extends BaseRestHandler { - - @Inject - public RestNodesShutdownAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - - controller.registerHandler(RestRequest.Method.POST, "/_shutdown", this); - controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/_shutdown", this); - controller.registerHandler(RestRequest.Method.POST, "/_cluster/nodes/{nodeId}/_shutdown", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId")); - NodesShutdownRequest nodesShutdownRequest = new NodesShutdownRequest(nodesIds); - nodesShutdownRequest.listenerThreaded(false); - nodesShutdownRequest.delay(request.paramAsTime("delay", nodesShutdownRequest.delay())); - nodesShutdownRequest.exit(request.paramAsBoolean("exit", nodesShutdownRequest.exit())); - client.admin().cluster().nodesShutdown(nodesShutdownRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(NodesShutdownResponse response, XContentBuilder builder) throws Exception { - builder.startObject(); - builder.field("cluster_name", response.getClusterName().value()); - - builder.startObject("nodes"); - for (DiscoveryNode node : response.getNodes()) { - builder.startObject(node.id(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("name", node.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.endObject(); - } - builder.endObject(); - - builder.endObject(); - return new BytesRestResponse(RestStatus.OK, builder); - } - }); - } -} diff --git a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java index be4e1b4e3f3..85b46925b5f 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/cluster/repositories/get/RestGetRepositoriesAction.java @@ -58,7 +58,7 @@ public class RestGetRepositoriesAction extends BaseRestHandler { public RestResponse buildResponse(GetRepositoriesResponse response, XContentBuilder builder) throws Exception { builder.startObject(); for (RepositoryMetaData repositoryMetaData : response.repositories()) { - RepositoriesMetaData.FACTORY.toXContent(repositoryMetaData, builder, request); + RepositoriesMetaData.toXContent(repositoryMetaData, builder, request); } builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java index 423d1c0f73b..2019b71426a 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/RestIndicesAliasesAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.admin.indices.alias; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.client.Client; @@ -62,7 +61,7 @@ public class RestIndicesAliasesAction extends BaseRestHandler { indicesAliasesRequest.timeout(request.paramAsTime("timeout", indicesAliasesRequest.timeout())); XContentParser.Token token = parser.nextToken(); if (token == null) { - throw new ElasticsearchIllegalArgumentException("No action is specified"); + throw new IllegalArgumentException("No action is specified"); } while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.START_ARRAY) { @@ -75,7 +74,7 @@ public class RestIndicesAliasesAction extends BaseRestHandler { } else if ("remove".equals(action)) { type = AliasAction.Type.REMOVE; } else { - throw new ElasticsearchIllegalArgumentException("Alias action [" + action + "] not supported"); + throw new IllegalArgumentException("Alias action [" + action + "] not supported"); } String index = null; String alias = null; diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java index 227deb11918..4965f6b218d 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/alias/put/RestIndexPutAliasAction.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.rest.action.admin.indices.alias.put; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; @@ -75,7 +74,7 @@ public class RestIndexPutAliasAction extends BaseRestHandler { try (XContentParser parser = XContentFactory.xContent(request.content()).createParser(request.content())) { XContentParser.Token token = parser.nextToken(); if (token == null) { - throw new ElasticsearchIllegalArgumentException("No index alias is specified"); + throw new IllegalArgumentException("No index alias is specified"); } String currentFieldName = null; while ((token = parser.nextToken()) != null) { diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java index 0495d3420cb..4a5e47b9664 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/analyze/RestAnalyzeAction.java @@ -19,18 +19,17 @@ package org.elasticsearch.rest.action.admin.indices.analyze; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse; import org.elasticsearch.client.Client; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestToXContentListener; import java.io.IOException; @@ -55,7 +54,9 @@ public class RestAnalyzeAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + String text = request.param("text"); + AnalyzeRequest analyzeRequest = new AnalyzeRequest(request.param("index")); analyzeRequest.text(text); analyzeRequest.listenerThreaded(false); @@ -66,26 +67,26 @@ public class RestAnalyzeAction extends BaseRestHandler { analyzeRequest.tokenFilters(request.paramAsStringArray("token_filters", request.paramAsStringArray("filters", analyzeRequest.tokenFilters()))); analyzeRequest.charFilters(request.paramAsStringArray("char_filters", analyzeRequest.charFilters())); - if (request.hasContent()) { - XContentType type = XContentFactory.xContentType(request.content()); + if (RestActions.hasBodyContent(request)) { + XContentType type = RestActions.guessBodyContentType(request); if (type == null) { if (text == null) { - text = request.content().toUtf8(); + text = RestActions.getRestContent(request).toUtf8(); analyzeRequest.text(text); } } else { // NOTE: if rest request with xcontent body has request parameters, the parameters does not override xcontent values - buildFromContent(request.content(), analyzeRequest); + buildFromContent(RestActions.getRestContent(request), analyzeRequest); } } client.admin().indices().analyze(analyzeRequest, new RestToXContentListener(channel)); } - public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) throws ElasticsearchIllegalArgumentException { + public static void buildFromContent(BytesReference content, AnalyzeRequest analyzeRequest) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Malforrmed content, must start with an object"); + throw new IllegalArgumentException("Malforrmed content, must start with an object"); } else { XContentParser.Token token; String currentFieldName = null; @@ -106,7 +107,7 @@ public class RestAnalyzeAction extends BaseRestHandler { List filters = Lists.newArrayList(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { - throw new ElasticsearchIllegalArgumentException(currentFieldName + " array element should only contain token filter's name"); + throw new IllegalArgumentException(currentFieldName + " array element should only contain token filter's name"); } filters.add(parser.text()); } @@ -115,23 +116,18 @@ public class RestAnalyzeAction extends BaseRestHandler { List charFilters = Lists.newArrayList(); while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { - throw new ElasticsearchIllegalArgumentException(currentFieldName + " array element should only contain char filter's name"); + throw new IllegalArgumentException(currentFieldName + " array element should only contain char filter's name"); } charFilters.add(parser.text()); } analyzeRequest.tokenFilters(charFilters.toArray(new String[0])); } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); + throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); } } } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); + throw new IllegalArgumentException("Failed to parse request body", e); } } - - - - - } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java index 8bd49e39d63..54d9948537e 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/cache/clear/RestClearIndicesCacheAction.java @@ -89,9 +89,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { if (Fields.FIELDS.match(entry.getKey())) { clearIndicesCacheRequest.fields(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.fields())); } - if (Fields.FILTER_KEYS.match(entry.getKey())) { - clearIndicesCacheRequest.filterKeys(request.paramAsStringArray(entry.getKey(), clearIndicesCacheRequest.filterKeys())); - } } return clearIndicesCacheRequest; @@ -103,7 +100,6 @@ public class RestClearIndicesCacheAction extends BaseRestHandler { public static final ParseField ID = new ParseField("id", "id_cache"); public static final ParseField RECYCLER = new ParseField("recycler"); public static final ParseField FIELDS = new ParseField("fields"); - public static final ParseField FILTER_KEYS = new ParseField("filter_keys"); } } diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java index dc800f37062..dd1dca34bbc 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/get/RestGetIndicesAction.java @@ -21,7 +21,6 @@ package org.elasticsearch.rest.action.admin.indices.get; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.admin.indices.get.GetIndexRequest; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.action.admin.indices.get.GetIndexResponse; @@ -105,7 +104,7 @@ public class RestGetIndicesAction extends BaseRestHandler { writeWarmers(response.warmers().get(index), builder, request); break; default: - throw new ElasticsearchIllegalStateException("feature [" + feature + "] is not valid"); + throw new IllegalStateException("feature [" + feature + "] is not valid"); } } builder.endObject(); @@ -147,7 +146,7 @@ public class RestGetIndicesAction extends BaseRestHandler { builder.startObject(Fields.WARMERS); if (warmers != null) { for (IndexWarmersMetaData.Entry warmer : warmers) { - IndexWarmersMetaData.FACTORY.toXContent(warmer, builder, params); + IndexWarmersMetaData.toXContent(warmer, builder, params); } } builder.endObject(); @@ -162,4 +161,4 @@ public class RestGetIndicesAction extends BaseRestHandler { static final XContentBuilderString WARMERS = new XContentBuilderString("warmers"); } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java index 6d960dc3fe9..32a2d24e888 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/validate/query/RestValidateQueryAction.java @@ -59,17 +59,12 @@ public class RestValidateQueryAction extends BaseRestHandler { ValidateQueryRequest validateQueryRequest = new ValidateQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); validateQueryRequest.listenerThreaded(false); validateQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, validateQueryRequest.indicesOptions())); - if (request.hasContent()) { - validateQueryRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + validateQueryRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - validateQueryRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - validateQueryRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + validateQueryRequest.source(querySourceBuilder); } } validateQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); diff --git a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java index 7023eecedd4..be83ccbe4b5 100644 --- a/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java +++ b/src/main/java/org/elasticsearch/rest/action/admin/indices/warmer/get/RestGetWarmerAction.java @@ -72,7 +72,7 @@ public class RestGetWarmerAction extends BaseRestHandler { builder.startObject(entry.key, XContentBuilder.FieldCaseConversion.NONE); builder.startObject(IndexWarmersMetaData.TYPE, XContentBuilder.FieldCaseConversion.NONE); for (IndexWarmersMetaData.Entry warmerEntry : entry.value) { - IndexWarmersMetaData.FACTORY.toXContent(warmerEntry, builder, request); + IndexWarmersMetaData.toXContent(warmerEntry, builder, request); } builder.endObject(); builder.endObject(); diff --git a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java index 82e11f75bea..58dcfa50973 100644 --- a/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java +++ b/src/main/java/org/elasticsearch/rest/action/count/RestCountAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.count; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -60,17 +59,12 @@ public class RestCountAction extends BaseRestHandler { CountRequest countRequest = new CountRequest(Strings.splitStringByCommaToArray(request.param("index"))); countRequest.indicesOptions(IndicesOptions.fromRequest(request, countRequest.indicesOptions())); countRequest.listenerThreaded(false); - if (request.hasContent()) { - countRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + countRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - countRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - countRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + countRequest.source(querySourceBuilder); } } countRequest.routing(request.param("routing")); @@ -80,7 +74,7 @@ public class RestCountAction extends BaseRestHandler { final int terminateAfter = request.paramAsInt("terminate_after", DEFAULT_TERMINATE_AFTER); if (terminateAfter < 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } else if (terminateAfter > 0) { countRequest.terminateAfter(terminateAfter); } diff --git a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java b/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java deleted file mode 100644 index ee9935d103a..00000000000 --- a/src/main/java/org/elasticsearch/rest/action/deletebyquery/RestDeleteByQueryAction.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.rest.action.deletebyquery; - -import org.elasticsearch.action.WriteConsistencyLevel; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.ShardDeleteByQueryRequest; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.action.support.QuerySourceBuilder; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.common.xcontent.XContentBuilderString; -import org.elasticsearch.rest.*; -import org.elasticsearch.rest.action.support.RestActions; -import org.elasticsearch.rest.action.support.RestBuilderListener; - -import static org.elasticsearch.rest.RestRequest.Method.DELETE; - -/** - * - */ -public class RestDeleteByQueryAction extends BaseRestHandler { - - @Inject - public RestDeleteByQueryAction(Settings settings, RestController controller, Client client) { - super(settings, controller, client); - controller.registerHandler(DELETE, "/{index}/_query", this); - controller.registerHandler(DELETE, "/{index}/{type}/_query", this); - } - - @Override - public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(Strings.splitStringByCommaToArray(request.param("index"))); - deleteByQueryRequest.listenerThreaded(false); - if (request.hasContent()) { - deleteByQueryRequest.source(request.content()); - } else { - String source = request.param("source"); - if (source != null) { - deleteByQueryRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - deleteByQueryRequest.source(querySourceBuilder); - } - } - } - deleteByQueryRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); - deleteByQueryRequest.timeout(request.paramAsTime("timeout", ShardDeleteByQueryRequest.DEFAULT_TIMEOUT)); - - deleteByQueryRequest.routing(request.param("routing")); - String consistencyLevel = request.param("consistency"); - if (consistencyLevel != null) { - deleteByQueryRequest.consistencyLevel(WriteConsistencyLevel.fromString(consistencyLevel)); - } - deleteByQueryRequest.indicesOptions(IndicesOptions.fromRequest(request, deleteByQueryRequest.indicesOptions())); - client.deleteByQuery(deleteByQueryRequest, new RestBuilderListener(channel) { - @Override - public RestResponse buildResponse(DeleteByQueryResponse result, XContentBuilder builder) throws Exception { - RestStatus restStatus = result.status(); - builder.startObject(); - builder.startObject(Fields._INDICES); - for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : result.getIndices().values()) { - builder.startObject(indexDeleteByQueryResponse.getIndex(), XContentBuilder.FieldCaseConversion.NONE); - indexDeleteByQueryResponse.getShardInfo().toXContent(builder, request); - builder.endObject(); - builder.endObject(); - } - builder.endObject(); - return new BytesRestResponse(restStatus, builder); - } - }); - } - - static final class Fields { - static final XContentBuilderString _INDICES = new XContentBuilderString("_indices"); - static final XContentBuilderString _SHARDS = new XContentBuilderString("_shards"); - static final XContentBuilderString TOTAL = new XContentBuilderString("total"); - static final XContentBuilderString SUCCESSFUL = new XContentBuilderString("successful"); - static final XContentBuilderString FAILED = new XContentBuilderString("failed"); - static final XContentBuilderString FAILURES = new XContentBuilderString("failures"); - static final XContentBuilderString INDEX = new XContentBuilderString("index"); - static final XContentBuilderString SHARD = new XContentBuilderString("shard"); - static final XContentBuilderString REASON = new XContentBuilderString("reason"); - } -} diff --git a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java index a10143c87b1..85f73f61ec4 100644 --- a/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/exists/RestExistsAction.java @@ -49,17 +49,12 @@ public class RestExistsAction extends BaseRestHandler { final ExistsRequest existsRequest = new ExistsRequest(Strings.splitStringByCommaToArray(request.param("index"))); existsRequest.indicesOptions(IndicesOptions.fromRequest(request, existsRequest.indicesOptions())); existsRequest.listenerThreaded(false); - if (request.hasContent()) { - existsRequest.source(request.content()); + if (RestActions.hasBodyContent(request)) { + existsRequest.source(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - existsRequest.source(source); - } else { - QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); - if (querySourceBuilder != null) { - existsRequest.source(querySourceBuilder); - } + QuerySourceBuilder querySourceBuilder = RestActions.parseQuerySource(request); + if (querySourceBuilder != null) { + existsRequest.source(querySourceBuilder); } } existsRequest.routing(request.param("routing")); diff --git a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java index 708d1a4ad44..ce306c6563d 100644 --- a/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java +++ b/src/main/java/org/elasticsearch/rest/action/explain/RestExplainAction.java @@ -20,13 +20,11 @@ package org.elasticsearch.rest.action.explain; import org.apache.lucene.search.Explanation; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.explain.ExplainRequest; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.client.Client; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -35,6 +33,7 @@ import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.fetch.source.FetchSourceContext; @@ -63,12 +62,9 @@ public class RestExplainAction extends BaseRestHandler { explainRequest.parent(request.param("parent")); explainRequest.routing(request.param("routing")); explainRequest.preference(request.param("preference")); - String sourceString = request.param("source"); String queryString = request.param("q"); - if (request.hasContent()) { - explainRequest.source(request.content()); - } else if (sourceString != null) { - explainRequest.source(new BytesArray(request.param("source"))); + if (RestActions.hasBodyContent(request)) { + explainRequest.source(RestActions.getRestContent(request)); } else if (queryString != null) { QueryStringQueryBuilder queryStringBuilder = QueryBuilders.queryStringQuery(queryString); queryStringBuilder.defaultField(request.param("df")); @@ -83,7 +79,7 @@ public class RestExplainAction extends BaseRestHandler { } else if ("AND".equals(defaultOperator)) { queryStringBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + throw new IllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); } } diff --git a/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java new file mode 100644 index 00000000000..fd45c5a56d4 --- /dev/null +++ b/src/main/java/org/elasticsearch/rest/action/fieldstats/RestFieldStatsAction.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.fieldstats; + +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.FieldStatsRequest; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.Client; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.*; +import org.elasticsearch.rest.action.support.RestBuilderListener; + +import java.util.Map; + +import static org.elasticsearch.rest.RestRequest.Method.GET; +import static org.elasticsearch.rest.RestRequest.Method.POST; +import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader; + +/** + */ +public class RestFieldStatsAction extends BaseRestHandler { + + @Inject + public RestFieldStatsAction(Settings settings, RestController controller, Client client) { + super(settings, controller, client); + controller.registerHandler(GET, "/_field_stats", this); + controller.registerHandler(POST, "/_field_stats", this); + controller.registerHandler(GET, "/{index}/_field_stats", this); + controller.registerHandler(POST, "/{index}/_field_stats", this); + } + + @Override + public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) { + final FieldStatsRequest fieldStatsRequest = new FieldStatsRequest(); + fieldStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index"))); + fieldStatsRequest.indicesOptions(IndicesOptions.fromRequest(request, fieldStatsRequest.indicesOptions())); + fieldStatsRequest.fields(Strings.splitStringByCommaToArray(request.param("fields"))); + fieldStatsRequest.level(request.param("level", FieldStatsRequest.DEFAULT_LEVEL)); + fieldStatsRequest.listenerThreaded(false); + + client.fieldStats(fieldStatsRequest, new RestBuilderListener(channel) { + @Override + public RestResponse buildResponse(FieldStatsResponse response, XContentBuilder builder) throws Exception { + builder.startObject(); + buildBroadcastShardsHeader(builder, response); + + builder.startObject("indices"); + for (Map.Entry> entry1 : response.getIndicesMergedFieldStats().entrySet()) { + builder.startObject(entry1.getKey()); + builder.startObject("fields"); + for (Map.Entry entry2 : entry1.getValue().entrySet()) { + builder.field(entry2.getKey()); + entry2.getValue().toXContent(builder, request); + } + builder.endObject(); + builder.endObject(); + } + builder.endObject(); + return new BytesRestResponse(RestStatus.OK, builder); + } + }); + } +} \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java index e196d954a7c..d95ef3e9498 100644 --- a/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java +++ b/src/main/java/org/elasticsearch/rest/action/index/RestIndexAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.index; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionWriteResponse; import org.elasticsearch.action.WriteConsistencyLevel; import org.elasticsearch.action.index.IndexRequest; @@ -88,7 +87,7 @@ public class RestIndexAction extends BaseRestHandler { if (sOpType != null) { try { indexRequest.opType(IndexRequest.OpType.fromString(sOpType)); - } catch (ElasticsearchIllegalArgumentException eia){ + } catch (IllegalArgumentException eia){ try { XContentBuilder builder = channel.newBuilder(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", eia.getMessage()).endObject())); diff --git a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java index aab5774af9a..d63a39ac555 100644 --- a/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java +++ b/src/main/java/org/elasticsearch/rest/action/script/RestPutIndexedScriptAction.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.rest.action.script; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptResponse; @@ -84,7 +83,7 @@ public class RestPutIndexedScriptAction extends BaseRestHandler { if (sOpType != null) { try { putRequest.opType(IndexRequest.OpType.fromString(sOpType)); - } catch (ElasticsearchIllegalArgumentException eia){ + } catch (IllegalArgumentException eia){ try { XContentBuilder builder = channel.newBuilder(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", eia.getMessage()).endObject())); diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java index e7b41316db9..5efd2584b38 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestClearScrollAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.search.ClearScrollRequest; import org.elasticsearch.action.search.ClearScrollResponse; import org.elasticsearch.client.Client; @@ -27,7 +26,6 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; @@ -57,15 +55,15 @@ public class RestClearScrollAction extends BaseRestHandler { String scrollIds = request.param("scroll_id"); ClearScrollRequest clearRequest = new ClearScrollRequest(); clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds))); - if (request.hasContent()) { - XContentType type = XContentFactory.xContentType(request.content()); + if (RestActions.hasBodyContent(request)) { + XContentType type = RestActions.guessBodyContentType(request); if (type == null) { scrollIds = RestActions.getRestContent(request).toUtf8(); clearRequest.setScrollIds(Arrays.asList(splitScrollIds(scrollIds))); } else { // NOTE: if rest request with xcontent body has request parameters, these parameters does not override xcontent value clearRequest.setScrollIds(null); - buildFromContent(request.content(), clearRequest); + buildFromContent(RestActions.getRestContent(request), clearRequest); } } @@ -79,10 +77,10 @@ public class RestClearScrollAction extends BaseRestHandler { return Strings.splitStringByCommaToArray(scrollIds); } - public static void buildFromContent(BytesReference content, ClearScrollRequest clearScrollRequest) throws ElasticsearchIllegalArgumentException { + public static void buildFromContent(BytesReference content, ClearScrollRequest clearScrollRequest) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Malformed content, must start with an object"); + throw new IllegalArgumentException("Malformed content, must start with an object"); } else { XContentParser.Token token; String currentFieldName = null; @@ -92,17 +90,17 @@ public class RestClearScrollAction extends BaseRestHandler { } else if ("scroll_id".equals(currentFieldName) && token == XContentParser.Token.START_ARRAY) { while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { if (token.isValue() == false) { - throw new ElasticsearchIllegalArgumentException("scroll_id array element should only contain scroll_id"); + throw new IllegalArgumentException("scroll_id array element should only contain scroll_id"); } clearScrollRequest.addScrollId(parser.text()); } } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); + throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); } } } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); + throw new IllegalArgumentException("Failed to parse request body", e); } } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index a0e04a1af06..9c3e85a2e94 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -31,6 +30,7 @@ import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.rest.*; import org.elasticsearch.rest.action.exists.RestExistsAction; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestStatusToXContentListener; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -87,20 +87,11 @@ public class RestSearchAction extends BaseRestHandler { // get the content, and put it in the body // add content/source as template if template flag is set boolean isTemplateRequest = request.path().endsWith("/template"); - if (request.hasContent()) { + if (RestActions.hasBodyContent(request)) { if (isTemplateRequest) { - searchRequest.templateSource(request.content()); + searchRequest.templateSource(RestActions.getRestContent(request)); } else { - searchRequest.source(request.content()); - } - } else { - String source = request.param("source"); - if (source != null) { - if (isTemplateRequest) { - searchRequest.templateSource(source); - } else { - searchRequest.source(source); - } + searchRequest.source(RestActions.getRestContent(request)); } } @@ -138,7 +129,7 @@ public class RestSearchAction extends BaseRestHandler { } else if ("AND".equals(defaultOperator)) { queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + throw new IllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); } } if (searchSourceBuilder == null) { @@ -187,7 +178,7 @@ public class RestSearchAction extends BaseRestHandler { int terminateAfter = request.paramAsInt("terminate_after", SearchContext.DEFAULT_TERMINATE_AFTER); if (terminateAfter < 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } else if (terminateAfter > 0) { searchSourceBuilder.terminateAfter(terminateAfter); } diff --git a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java index cd2dbf856a3..15de56265bc 100644 --- a/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java +++ b/src/main/java/org/elasticsearch/rest/action/search/RestSearchScrollAction.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.search; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; import org.elasticsearch.client.Client; @@ -68,8 +67,8 @@ public class RestSearchScrollAction extends BaseRestHandler { searchScrollRequest.scroll(new Scroll(parseTimeValue(scroll, null))); } - if (request.hasContent()) { - XContentType type = XContentFactory.xContentType(request.content()); + if (RestActions.hasBodyContent(request)) { + XContentType type = XContentFactory.xContentType(RestActions.getRestContent(request)); if (type == null) { if (scrollId == null) { scrollId = RestActions.getRestContent(request).toUtf8(); @@ -77,16 +76,16 @@ public class RestSearchScrollAction extends BaseRestHandler { } } else { // NOTE: if rest request with xcontent body has request parameters, these parameters override xcontent values - buildFromContent(request.content(), searchScrollRequest); + buildFromContent(RestActions.getRestContent(request), searchScrollRequest); } } client.searchScroll(searchScrollRequest, new RestStatusToXContentListener(channel)); } - public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) throws ElasticsearchIllegalArgumentException { + public static void buildFromContent(BytesReference content, SearchScrollRequest searchScrollRequest) { try (XContentParser parser = XContentHelper.createParser(content)) { if (parser.nextToken() != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Malforrmed content, must start with an object"); + throw new IllegalArgumentException("Malforrmed content, must start with an object"); } else { XContentParser.Token token; String currentFieldName = null; @@ -98,13 +97,12 @@ public class RestSearchScrollAction extends BaseRestHandler { } else if ("scroll".equals(currentFieldName) && token == XContentParser.Token.VALUE_STRING) { searchScrollRequest.scroll(new Scroll(TimeValue.parseTimeValue(parser.text(), null))); } else { - throw new ElasticsearchIllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); + throw new IllegalArgumentException("Unknown parameter [" + currentFieldName + "] in request body or parameter is of the wrong type[" + token + "] "); } } } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("Failed to parse request body", e); + throw new IllegalArgumentException("Failed to parse request body", e); } } - } diff --git a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java index 28b30efd893..9f9c3946b36 100644 --- a/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java +++ b/src/main/java/org/elasticsearch/rest/action/suggest/RestSuggestAction.java @@ -22,7 +22,6 @@ package org.elasticsearch.rest.action.suggest; import static org.elasticsearch.rest.RestRequest.Method.GET; import static org.elasticsearch.rest.RestRequest.Method.POST; import static org.elasticsearch.rest.action.support.RestActions.buildBroadcastShardsHeader; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestResponse; import org.elasticsearch.action.support.IndicesOptions; @@ -38,6 +37,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.rest.action.support.RestActions; import org.elasticsearch.rest.action.support.RestBuilderListener; import org.elasticsearch.search.suggest.Suggest; @@ -60,15 +60,10 @@ public class RestSuggestAction extends BaseRestHandler { SuggestRequest suggestRequest = new SuggestRequest(Strings.splitStringByCommaToArray(request.param("index"))); suggestRequest.indicesOptions(IndicesOptions.fromRequest(request, suggestRequest.indicesOptions())); suggestRequest.listenerThreaded(false); - if (request.hasContent()) { - suggestRequest.suggest(request.content()); + if (RestActions.hasBodyContent(request)) { + suggestRequest.suggest(RestActions.getRestContent(request)); } else { - String source = request.param("source"); - if (source != null) { - suggestRequest.suggest(source); - } else { - throw new ElasticsearchIllegalArgumentException("no content or source provided to execute suggestion"); - } + throw new IllegalArgumentException("no content or source provided to execute suggestion"); } suggestRequest.routing(request.param("routing")); suggestRequest.preference(request.param("preference")); diff --git a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java index 5de5396eb9f..8bd59f1e8a9 100644 --- a/src/main/java/org/elasticsearch/rest/action/support/RestActions.java +++ b/src/main/java/org/elasticsearch/rest/action/support/RestActions.java @@ -19,7 +19,6 @@ package org.elasticsearch.rest.action.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.support.QuerySourceBuilder; import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; @@ -28,6 +27,8 @@ import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryStringQueryBuilder; import org.elasticsearch.rest.RestRequest; @@ -104,7 +105,7 @@ public class RestActions { } else if ("AND".equals(defaultOperator)) { queryBuilder.defaultOperator(QueryStringQueryBuilder.Operator.AND); } else { - throw new ElasticsearchIllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); + throw new IllegalArgumentException("Unsupported defaultOperator [" + defaultOperator + "], can either be [OR] or [AND]"); } } return new QuerySourceBuilder().setQuery(queryBuilder); @@ -128,4 +129,24 @@ public class RestActions { return content; } + + /** + * guesses the content type from either payload or source parameter + * @param request Rest request + * @return rest content type or null if not applicable. + */ + public static XContentType guessBodyContentType(final RestRequest request) { + final BytesReference restContent = RestActions.getRestContent(request); + if (restContent == null) { + return null; + } + return XContentFactory.xContentType(restContent); + } + + /** + * Returns true if either payload or source parameter is present. Otherwise false + */ + public static boolean hasBodyContent(final RestRequest request) { + return request.hasContent() || request.hasParam("source"); + } } diff --git a/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java b/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java index f7e19304724..af81dfcd0a9 100644 --- a/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java +++ b/src/main/java/org/elasticsearch/rest/action/termvectors/RestTermVectorsAction.java @@ -65,15 +65,9 @@ public class RestTermVectorsAction extends BaseRestHandler { @Override public void handleRequest(final RestRequest request, final RestChannel channel, final Client client) throws Exception { TermVectorsRequest termVectorsRequest = new TermVectorsRequest(request.param("index"), request.param("type"), request.param("id")); - XContentParser parser = null; - if (request.hasContent()) { - try { - parser = XContentFactory.xContent(request.content()).createParser(request.content()); + if (RestActions.hasBodyContent(request)) { + try (XContentParser parser = XContentFactory.xContent(RestActions.guessBodyContentType(request)).createParser(RestActions.getRestContent(request))){ TermVectorsRequest.parseRequest(termVectorsRequest, parser); - } finally { - if (parser != null) { - parser.close(); - } } } readURIParameters(termVectorsRequest, request); diff --git a/src/main/java/org/elasticsearch/river/RiversManager.java b/src/main/java/org/elasticsearch/river/RiversManager.java index 30d67d4cc2f..42d7021a5f3 100644 --- a/src/main/java/org/elasticsearch/river/RiversManager.java +++ b/src/main/java/org/elasticsearch/river/RiversManager.java @@ -46,21 +46,21 @@ public class RiversManager extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { riversRouter.start(); riversService.start(); clusterService.start(); } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { riversRouter.stop(); clusterService.stop(); riversService.stop(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { riversRouter.close(); clusterService.close(); riversService.close(); diff --git a/src/main/java/org/elasticsearch/river/RiversService.java b/src/main/java/org/elasticsearch/river/RiversService.java index 0d7863a6468..ed7369d8ad0 100644 --- a/src/main/java/org/elasticsearch/river/RiversService.java +++ b/src/main/java/org/elasticsearch/river/RiversService.java @@ -88,11 +88,11 @@ public class RiversService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { ImmutableSet indices = ImmutableSet.copyOf(this.rivers.keySet()); final CountDownLatch latch = new CountDownLatch(indices.size()); for (final RiverName riverName : indices) { @@ -117,10 +117,10 @@ public class RiversService extends AbstractLifecycleComponent { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } - public synchronized void createRiver(RiverName riverName, Map settings) throws ElasticsearchException { + public synchronized void createRiver(RiverName riverName, Map settings) { if (riversInjectors.containsKey(riverName)) { logger.warn("ignoring river [{}][{}] creation, already exists", riverName.type(), riverName.name()); return; @@ -182,7 +182,7 @@ public class RiversService extends AbstractLifecycleComponent { } } - public synchronized void closeRiver(RiverName riverName) throws ElasticsearchException { + public synchronized void closeRiver(RiverName riverName) { Injector riverInjector; River river; synchronized (this) { diff --git a/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java b/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java index 0b39d50d4bf..d783adee752 100644 --- a/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java +++ b/src/main/java/org/elasticsearch/river/cluster/PublishRiverClusterStateAction.java @@ -38,7 +38,7 @@ public class PublishRiverClusterStateAction extends AbstractComponent { public static final String ACTION_NAME = "internal:river/state/publish"; - public static interface NewClusterStateListener { + public interface NewClusterStateListener { void onNewClusterState(RiverClusterState clusterState); } @@ -54,7 +54,7 @@ public class PublishRiverClusterStateAction extends AbstractComponent { this.transportService = transportService; this.clusterService = clusterService; this.listener = listener; - transportService.registerHandler(ACTION_NAME, new PublishClusterStateRequestHandler()); + transportService.registerRequestHandler(ACTION_NAME, PublishClusterStateRequest.class, ThreadPool.Names.SAME, new PublishClusterStateRequestHandler()); } public void close() { @@ -87,11 +87,11 @@ public class PublishRiverClusterStateAction extends AbstractComponent { } } - private class PublishClusterStateRequest extends TransportRequest { + static class PublishClusterStateRequest extends TransportRequest { private RiverClusterState clusterState; - private PublishClusterStateRequest() { + PublishClusterStateRequest() { } private PublishClusterStateRequest(RiverClusterState clusterState) { @@ -111,18 +111,7 @@ public class PublishRiverClusterStateAction extends AbstractComponent { } } - private class PublishClusterStateRequestHandler extends BaseTransportRequestHandler { - - @Override - public PublishClusterStateRequest newInstance() { - return new PublishClusterStateRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - + private class PublishClusterStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(PublishClusterStateRequest request, TransportChannel channel) throws Exception { listener.onNewClusterState(request.clusterState); diff --git a/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java b/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java index 49488c889f2..11a0350a40d 100644 --- a/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java +++ b/src/main/java/org/elasticsearch/river/cluster/RiverClusterService.java @@ -59,12 +59,12 @@ public class RiverClusterService extends AbstractLifecycleComponent imple } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/main/java/org/elasticsearch/script/CompiledScript.java b/src/main/java/org/elasticsearch/script/CompiledScript.java index 54cca316871..9e3bfaf3f4c 100644 --- a/src/main/java/org/elasticsearch/script/CompiledScript.java +++ b/src/main/java/org/elasticsearch/script/CompiledScript.java @@ -20,23 +20,35 @@ package org.elasticsearch.script; /** - * + * CompiledScript holds all the parameters necessary to execute a previously compiled script. */ public class CompiledScript { - private final String type; - + private final String lang; private final Object compiled; - public CompiledScript(String type, Object compiled) { - this.type = type; + /** + * Constructor for CompiledScript. + * @param lang The language of the script to be executed. + * @param compiled The compiled script Object that is executable. + */ + public CompiledScript(String lang, Object compiled) { + this.lang = lang; this.compiled = compiled; } + /** + * Method to get the language. + * @return The language of the script to be executed. + */ public String lang() { - return type; + return lang; } + /** + * Method to get the compiled script object. + * @return The compiled script Object that is executable. + */ public Object compiled() { return compiled; } diff --git a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java index 45b8ee26712..70bf27b82e4 100644 --- a/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/NativeScriptEngineService.java @@ -22,7 +22,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -68,7 +67,7 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri if (scriptFactory != null) { return scriptFactory; } - throw new ElasticsearchIllegalArgumentException("Native script [" + script + "] not found"); + throw new IllegalArgumentException("Native script [" + script + "] not found"); } @Override @@ -108,4 +107,4 @@ public class NativeScriptEngineService extends AbstractComponent implements Scri public void scriptRemoved(CompiledScript script) { // Nothing to do here } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/script/Script.java b/src/main/java/org/elasticsearch/script/Script.java new file mode 100644 index 00000000000..655ff82c08e --- /dev/null +++ b/src/main/java/org/elasticsearch/script/Script.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script; + +import java.util.Map; + + +import static org.elasticsearch.script.ScriptService.ScriptType; + +/** + * Script holds all the parameters necessary to compile or find in cache and then execute a script. + */ +public class Script { + + private final String lang; + private final String script; + private final ScriptType type; + private final Map params; + + /** + * Constructor for Script. + * @param lang The language of the script to be compiled/executed. + * @param script The cache key of the script to be compiled/executed. For dynamic scripts this is the actual + * script source code. For indexed scripts this is the id used in the request. For on disk scripts + * this is the file name. + * @param type The type of script -- dynamic, indexed, or file. + * @param params The map of parameters the script will be executed with. + */ + public Script(String lang, String script, ScriptType type, Map params) { + if (script == null) { + throw new IllegalArgumentException("The parameter script (String) must not be null in Script."); + } + if (type == null) { + throw new IllegalArgumentException("The parameter type (ScriptType) must not be null in Script."); + } + + this.lang = lang; + this.script = script; + this.type = type; + this.params = params; + } + + /** + * Method for getting language. + * @return The language of the script to be compiled/executed. + */ + public String getLang() { + return lang; + } + + /** + * Method for getting the script. + * @return The cache key of the script to be compiled/executed. For dynamic scripts this is the actual + * script source code. For indexed scripts this is the id used in the request. For on disk scripts + * this is the file name. + */ + public String getScript() { + return script; + } + + /** + * Method for getting the type. + * @return The type of script -- dynamic, indexed, or file. + */ + public ScriptType getType() { + return type; + } + + /** + * Method for getting the parameters. + * @return The map of parameters the script will be executed with. + */ + public Map getParams() { + return params; + } +} diff --git a/src/main/java/org/elasticsearch/script/ScriptContext.java b/src/main/java/org/elasticsearch/script/ScriptContext.java index 18224e81483..a12fc85a53c 100644 --- a/src/main/java/org/elasticsearch/script/ScriptContext.java +++ b/src/main/java/org/elasticsearch/script/ScriptContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; /** @@ -75,10 +74,10 @@ public interface ScriptContext { */ public Plugin(String pluginName, String operation) { if (Strings.hasLength(pluginName) == false) { - throw new ElasticsearchIllegalArgumentException("plugin name cannot be empty when registering a custom script context"); + throw new IllegalArgumentException("plugin name cannot be empty when registering a custom script context"); } if (Strings.hasLength(operation) == false) { - throw new ElasticsearchIllegalArgumentException("operation name cannot be empty when registering a custom script context"); + throw new IllegalArgumentException("operation name cannot be empty when registering a custom script context"); } this.pluginName = pluginName; this.operation = operation; diff --git a/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java b/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java index 40596870b99..614e41e1c11 100644 --- a/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java +++ b/src/main/java/org/elasticsearch/script/ScriptContextRegistry.java @@ -23,7 +23,6 @@ import com.google.common.collect.ImmutableCollection; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.Map; @@ -46,7 +45,7 @@ public final class ScriptContextRegistry { validateScriptContext(customScriptContext); ScriptContext previousContext = scriptContexts.put(customScriptContext.getKey(), customScriptContext); if (previousContext != null) { - throw new ElasticsearchIllegalArgumentException("script context [" + customScriptContext.getKey() + "] cannot be registered twice"); + throw new IllegalArgumentException("script context [" + customScriptContext.getKey() + "] cannot be registered twice"); } } this.scriptContexts = ImmutableMap.copyOf(scriptContexts); @@ -69,10 +68,10 @@ public final class ScriptContextRegistry { //script contexts can be used in fine-grained settings, we need to be careful with what we allow here private void validateScriptContext(ScriptContext.Plugin scriptContext) { if (RESERVED_SCRIPT_CONTEXTS.contains(scriptContext.getPluginName())) { - throw new ElasticsearchIllegalArgumentException("[" + scriptContext.getPluginName() + "] is a reserved name, it cannot be registered as a custom script context"); + throw new IllegalArgumentException("[" + scriptContext.getPluginName() + "] is a reserved name, it cannot be registered as a custom script context"); } if (RESERVED_SCRIPT_CONTEXTS.contains(scriptContext.getOperation())) { - throw new ElasticsearchIllegalArgumentException("[" + scriptContext.getOperation() + "] is a reserved name, it cannot be registered as a custom script context"); + throw new IllegalArgumentException("[" + scriptContext.getOperation() + "] is a reserved name, it cannot be registered as a custom script context"); } } diff --git a/src/main/java/org/elasticsearch/script/ScriptMode.java b/src/main/java/org/elasticsearch/script/ScriptMode.java index 855690b794b..b35dda716e4 100644 --- a/src/main/java/org/elasticsearch/script/ScriptMode.java +++ b/src/main/java/org/elasticsearch/script/ScriptMode.java @@ -19,7 +19,6 @@ package org.elasticsearch.script; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Booleans; import java.util.Locale; @@ -45,7 +44,7 @@ enum ScriptMode { if (SANDBOX.toString().equals(input)) { return SANDBOX; } - throw new ElasticsearchIllegalArgumentException("script mode [" + input + "] not supported"); + throw new IllegalArgumentException("script mode [" + input + "] not supported"); } diff --git a/src/main/java/org/elasticsearch/script/ScriptModes.java b/src/main/java/org/elasticsearch/script/ScriptModes.java index 854a7ec2e40..7411e3a0592 100644 --- a/src/main/java/org/elasticsearch/script/ScriptModes.java +++ b/src/main/java/org/elasticsearch/script/ScriptModes.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.script.ScriptService.ScriptType; @@ -159,7 +158,7 @@ public class ScriptModes { } ScriptMode scriptMode = scriptModes.get(ENGINE_SETTINGS_PREFIX + "." + lang + "." + scriptType + "." + scriptContext.getKey()); if (scriptMode == null) { - throw new ElasticsearchIllegalArgumentException("script mode not found for lang [" + lang + "], script_type [" + scriptType + "], operation [" + scriptContext.getKey() + "]"); + throw new IllegalArgumentException("script mode not found for lang [" + lang + "], script_type [" + scriptType + "], operation [" + scriptContext.getKey() + "]"); } return scriptMode; } diff --git a/src/main/java/org/elasticsearch/script/ScriptModule.java b/src/main/java/org/elasticsearch/script/ScriptModule.java index a5a4707db04..2cc12e13d85 100644 --- a/src/main/java/org/elasticsearch/script/ScriptModule.java +++ b/src/main/java/org/elasticsearch/script/ScriptModule.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; import org.elasticsearch.common.inject.multibindings.Multibinder; @@ -82,7 +81,7 @@ public class ScriptModule extends AbstractModule { String name = entry.getKey(); Class type = entry.getValue().getAsClass("type", NativeScriptFactory.class); if (type == NativeScriptFactory.class) { - throw new ElasticsearchIllegalArgumentException("type is missing for native script [" + name + "]"); + throw new IllegalArgumentException("type is missing for native script [" + name + "]"); } scriptsBinder.addBinding(name).to(type).asEagerSingleton(); } diff --git a/src/main/java/org/elasticsearch/script/ScriptService.java b/src/main/java/org/elasticsearch/script/ScriptService.java index 3320dea795d..8e363068c30 100644 --- a/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/src/main/java/org/elasticsearch/script/ScriptService.java @@ -27,21 +27,17 @@ import com.google.common.cache.RemovalNotification; import com.google.common.collect.ImmutableMap; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptRequest; import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -60,7 +56,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; import org.elasticsearch.index.query.TemplateQueryParser; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.watcher.FileChangesListener; @@ -74,7 +69,6 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.Locale; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -103,7 +97,6 @@ public class ScriptService extends AbstractComponent implements Closeable { private final Cache cache; private final Path scriptsDirectory; - private final FileWatcher fileWatcher; private final ScriptModes scriptModes; private final ScriptContextRegistry scriptContextRegistry; @@ -117,11 +110,11 @@ public class ScriptService extends AbstractComponent implements Closeable { @Inject public ScriptService(Settings settings, Environment env, Set scriptEngines, - ResourceWatcherService resourceWatcherService, NodeSettingsService nodeSettingsService, ScriptContextRegistry scriptContextRegistry) throws IOException { + ResourceWatcherService resourceWatcherService, ScriptContextRegistry scriptContextRegistry) throws IOException { super(settings); if (Strings.hasLength(settings.get(DISABLE_DYNAMIC_SCRIPTING_SETTING))) { - throw new ElasticsearchIllegalArgumentException(DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings. \n" + + throw new IllegalArgumentException(DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings. \n" + "Dynamic scripts can be enabled for all languages and all operations by replacing `script.disable_dynamic: false` with `script.inline: on` and `script.indexed: on` in elasticsearch.yml"); } @@ -162,7 +155,7 @@ public class ScriptService extends AbstractComponent implements Closeable { if (logger.isTraceEnabled()) { logger.trace("Using scripts directory [{}] ", scriptsDirectory); } - this.fileWatcher = new FileWatcher(scriptsDirectory); + FileWatcher fileWatcher = new FileWatcher(scriptsDirectory); fileWatcher.addListener(new ScriptChangesListener()); if (settings.getAsBoolean(SCRIPT_AUTO_RELOAD_ENABLED_SETTING, true)) { @@ -172,7 +165,6 @@ public class ScriptService extends AbstractComponent implements Closeable { // automatic reload is disable just load scripts once fileWatcher.init(); } - nodeSettingsService.addListener(new ApplySettings()); } //This isn't set in the ctor because doing so creates a guice circular @@ -186,25 +178,10 @@ public class ScriptService extends AbstractComponent implements Closeable { IOUtils.close(scriptEngines); } - /** - * Clear both the in memory and on disk compiled script caches. Files on - * disk will be treated as if they are new and recompiled. - * */ - public void clearCache() { - logger.debug("clearing script cache"); - // Clear the in-memory script caches - this.cache.invalidateAll(); - this.cache.cleanUp(); - // Clear the cache of on-disk scripts - this.staticCache.clear(); - // Clear the file watcher's state so it re-compiles on-disk scripts - this.fileWatcher.clearState(); - } - private ScriptEngineService getScriptEngineServiceForLang(String lang) { ScriptEngineService scriptEngineService = scriptEnginesByLang.get(lang); if (scriptEngineService == null) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + lang + "]"); + throw new IllegalArgumentException("script_lang not supported [" + lang + "]"); } return scriptEngineService; } @@ -212,7 +189,7 @@ public class ScriptService extends AbstractComponent implements Closeable { private ScriptEngineService getScriptEngineServiceForFileExt(String fileExtension) { ScriptEngineService scriptEngineService = scriptEnginesByExt.get(fileExtension); if (scriptEngineService == null) { - throw new ElasticsearchIllegalArgumentException("script file extension not supported [" + fileExtension + "]"); + throw new IllegalArgumentException("script file extension not supported [" + fileExtension + "]"); } return scriptEngineService; } @@ -220,57 +197,67 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Checks if a script can be executed and compiles it if needed, or returns the previously compiled and cached script. */ - public CompiledScript compile(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { - assert script != null; - assert scriptType != null; - assert scriptContext != null; + public CompiledScript compile(Script script, ScriptContext scriptContext) { + if (script == null) { + throw new IllegalArgumentException("The parameter script (Script) must not be null."); + } + if (scriptContext == null) { + throw new IllegalArgumentException("The parameter scriptContext (ScriptContext) must not be null."); + } + + String lang = script.getLang(); if (lang == null) { lang = defaultLang; } ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); - if (canExecuteScript(lang, scriptEngineService, scriptType, scriptContext) == false) { - throw new ScriptException("scripts of type [" + scriptType + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled"); + if (canExecuteScript(lang, scriptEngineService, script.getType(), scriptContext) == false) { + throw new ScriptException("scripts of type [" + script.getType() + "], operation [" + scriptContext.getKey() + "] and lang [" + lang + "] are disabled"); } - return compileInternal(lang, script, scriptType); + return compileInternal(script); } /** * Compiles a script straight-away, or returns the previously compiled and cached script, without checking if it can be executed based on settings. */ - public CompiledScript compileInternal(String lang, final String scriptOrId, final ScriptType scriptType) { - assert scriptOrId != null; - assert scriptType != null; + public CompiledScript compileInternal(Script script) { + if (script == null) { + throw new IllegalArgumentException("The parameter script (Script) must not be null."); + } + + String lang = script.getLang(); + if (lang == null) { lang = defaultLang; } if (logger.isTraceEnabled()) { - logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, scriptType, scriptOrId); + logger.trace("Compiling lang: [{}] type: [{}] script: {}", lang, script.getType(), script.getScript()); } ScriptEngineService scriptEngineService = getScriptEngineServiceForLang(lang); - CacheKey cacheKey = newCacheKey(scriptEngineService, scriptOrId); + CacheKey cacheKey = newCacheKey(scriptEngineService, script.getScript()); - if (scriptType == ScriptType.FILE) { + if (script.getType() == ScriptType.FILE) { CompiledScript compiled = staticCache.get(cacheKey); //On disk scripts will be loaded into the staticCache by the listener if (compiled == null) { - throw new ElasticsearchIllegalArgumentException("Unable to find on disk script " + scriptOrId); + throw new IllegalArgumentException("Unable to find on disk script " + script.getScript()); } return compiled; } - String script = scriptOrId; - if (scriptType == ScriptType.INDEXED) { - final IndexedScript indexedScript = new IndexedScript(lang, scriptOrId); - script = getScriptFromIndex(indexedScript.lang, indexedScript.id); - cacheKey = newCacheKey(scriptEngineService, script); + String code = script.getScript(); + + if (script.getType() == ScriptType.INDEXED) { + final IndexedScript indexedScript = new IndexedScript(lang, script.getScript()); + code = getScriptFromIndex(indexedScript.lang, indexedScript.id); + cacheKey = newCacheKey(scriptEngineService, code); } CompiledScript compiled = cache.getIfPresent(cacheKey); if (compiled == null) { //Either an un-cached inline script or an indexed script - compiled = new CompiledScript(lang, scriptEngineService.compile(script)); + compiled = new CompiledScript(lang, scriptEngineService.compile(code)); //Since the cache key is the script content itself we don't need to //invalidate/check the cache if an indexed script changes. cache.put(cacheKey, compiled); @@ -290,14 +277,14 @@ public class ScriptService extends AbstractComponent implements Closeable { if (scriptLang == null) { scriptLang = defaultLang; } else if (scriptEnginesByLang.containsKey(scriptLang) == false) { - throw new ElasticsearchIllegalArgumentException("script_lang not supported [" + scriptLang + "]"); + throw new IllegalArgumentException("script_lang not supported [" + scriptLang + "]"); } return scriptLang; } String getScriptFromIndex(String scriptLang, String id) { if (client == null) { - throw new ElasticsearchIllegalArgumentException("Got an indexed script with no Client registered."); + throw new IllegalArgumentException("Got an indexed script with no Client registered."); } scriptLang = validateScriptLanguage(scriptLang); GetRequest getRequest = new GetRequest(SCRIPT_INDEX, scriptLang, id); @@ -305,7 +292,7 @@ public class ScriptService extends AbstractComponent implements Closeable { if (responseFields.isExists()) { return getScriptFromResponse(responseFields); } - throw new ElasticsearchIllegalArgumentException("Unable to find script [" + SCRIPT_INDEX + "/" + throw new IllegalArgumentException("Unable to find script [" + SCRIPT_INDEX + "/" + scriptLang + "/" + id + "]"); } @@ -320,23 +307,23 @@ public class ScriptService extends AbstractComponent implements Closeable { //we don't know yet what the script will be used for, but if all of the operations for this lang with //indexed scripts are disabled, it makes no sense to even compile it and cache it. if (isAnyScriptContextEnabled(scriptLang, getScriptEngineServiceForLang(scriptLang), ScriptType.INDEXED)) { - CompiledScript compiledScript = compileInternal(scriptLang, context.template(), ScriptType.INLINE); + CompiledScript compiledScript = compileInternal(new Script(scriptLang, context.template(), ScriptType.INLINE, null)); if (compiledScript == null) { - throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() + + throw new IllegalArgumentException("Unable to parse [" + context.template() + "] lang [" + scriptLang + "] (ScriptService.compile returned null)"); } } else { logger.warn("skipping compile of script [{}], lang [{}] as all scripted operations are disabled for indexed scripts", context.template(), scriptLang); } } catch (Exception e) { - throw new ElasticsearchIllegalArgumentException("Unable to parse [" + context.template() + + throw new IllegalArgumentException("Unable to parse [" + context.template() + "] lang [" + scriptLang + "]", e); } } else { - throw new ElasticsearchIllegalArgumentException("Unable to find script in : " + scriptBytes.toUtf8()); + throw new IllegalArgumentException("Unable to find script in : " + scriptBytes.toUtf8()); } } catch (IOException e) { - throw new ElasticsearchIllegalArgumentException("failed to parse template script", e); + throw new IllegalArgumentException("failed to parse template script", e); } } @@ -372,7 +359,7 @@ public class ScriptService extends AbstractComponent implements Closeable { return template.toString(); } } catch (IOException | ClassCastException e) { - throw new ElasticsearchIllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); + throw new IllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); } } else if (source.containsKey("script")) { return source.get("script").toString(); @@ -382,7 +369,7 @@ public class ScriptService extends AbstractComponent implements Closeable { builder.map(responseFields.getSource()); return builder.string(); } catch (IOException|ClassCastException e) { - throw new ElasticsearchIllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); + throw new IllegalStateException("Unable to parse " + responseFields.getSourceAsString() + " as json", e); } } } @@ -390,8 +377,8 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided script */ - public ExecutableScript executable(String lang, String script, ScriptType scriptType, ScriptContext scriptContext, Map vars) { - return executable(compile(lang, script, scriptType, scriptContext), vars); + public ExecutableScript executable(Script script, ScriptContext scriptContext) { + return executable(compile(script, scriptContext), script.getParams()); } /** @@ -404,9 +391,9 @@ public class ScriptService extends AbstractComponent implements Closeable { /** * Compiles (or retrieves from cache) and executes the provided search script */ - public SearchScript search(SearchLookup lookup, String lang, String script, ScriptType scriptType, ScriptContext scriptContext, @Nullable Map vars) { - CompiledScript compiledScript = compile(lang, script, scriptType, scriptContext); - return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, vars); + public SearchScript search(SearchLookup lookup, Script script, ScriptContext scriptContext) { + CompiledScript compiledScript = compile(script, scriptContext); + return getScriptEngineServiceForLang(compiledScript.lang()).search(compiledScript.compiled(), lookup, script.getParams()); } private boolean isAnyScriptContextEnabled(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType) { @@ -421,7 +408,7 @@ public class ScriptService extends AbstractComponent implements Closeable { private boolean canExecuteScript(String lang, ScriptEngineService scriptEngineService, ScriptType scriptType, ScriptContext scriptContext) { assert lang != null; if (scriptContextRegistry.isSupportedContext(scriptContext) == false) { - throw new ElasticsearchIllegalArgumentException("script context [" + scriptContext.getKey() + "] not supported"); + throw new IllegalArgumentException("script context [" + scriptContext.getKey() + "] not supported"); } ScriptMode mode = scriptModes.getScriptMode(lang, scriptType, scriptContext); switch (mode) { @@ -432,7 +419,7 @@ public class ScriptService extends AbstractComponent implements Closeable { case SANDBOX: return scriptEngineService.sandboxed(); default: - throw new ElasticsearchIllegalArgumentException("script mode [" + mode + "] not supported"); + throw new IllegalArgumentException("script mode [" + mode + "] not supported"); } } @@ -554,7 +541,7 @@ public class ScriptService extends AbstractComponent implements Closeable { case FILE_VAL: return FILE; default: - throw new ElasticsearchIllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + + throw new IllegalArgumentException("Unexpected value read for ScriptType got [" + scriptTypeVal + "] expected one of [" + INLINE_VAL + "," + INDEXED_VAL + "," + FILE_VAL + "]"); } } @@ -572,7 +559,7 @@ public class ScriptService extends AbstractComponent implements Closeable { out.writeVInt(FILE_VAL); return; default: - throw new ElasticsearchIllegalStateException("Unknown ScriptType " + scriptType); + throw new IllegalStateException("Unknown ScriptType " + scriptType); } } else { out.writeVInt(INLINE_VAL); //Default to inline @@ -624,34 +611,15 @@ public class ScriptService extends AbstractComponent implements Closeable { this.id = script; } else { if (parts.length != 3) { - throw new ElasticsearchIllegalArgumentException("Illegal index script format [" + script + "]" + + throw new IllegalArgumentException("Illegal index script format [" + script + "]" + " should be /lang/id"); } else { if (!parts[1].equals(this.lang)) { - throw new ElasticsearchIllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]"); + throw new IllegalStateException("Conflicting script language, found [" + parts[1] + "] expected + ["+ this.lang + "]"); } this.id = parts[2]; } } } } - - private class ApplySettings implements NodeSettingsService.Listener { - @Override - public void onRefreshSettings(Settings settings) { - GroovyScriptEngineService engine = (GroovyScriptEngineService) ScriptService.this.scriptEnginesByLang.get(GroovyScriptEngineService.NAME); - if (engine != null) { - String[] patches = settings.getAsArray(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY); - boolean blacklistChanged = engine.addToBlacklist(patches); - if (blacklistChanged) { - logger.info("adding {} to [{}], new blacklisted methods: {}", patches, - GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, engine.blacklistAdditions()); - engine.reloadConfig(); - // Because the GroovyScriptEngineService knows nothing about the - // cache, we need to clear it here if the setting changes - ScriptService.this.clearCache(); - } - } - } - } } diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java b/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java similarity index 50% rename from src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java rename to src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java index 8906d658c47..64eed0741bc 100644 --- a/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesShutdownAction.java +++ b/src/main/java/org/elasticsearch/script/expression/DateMethodFunctionValues.java @@ -17,29 +17,30 @@ * under the License. */ -package org.elasticsearch.action.admin.cluster.node.shutdown; +package org.elasticsearch.script.expression; -import org.elasticsearch.action.admin.cluster.ClusterAction; -import org.elasticsearch.client.ClusterAdminClient; +import java.util.Calendar; +import java.util.Locale; +import java.util.TimeZone; -/** - */ -public class NodesShutdownAction extends ClusterAction { +import org.apache.lucene.queries.function.ValueSource; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; - public static final NodesShutdownAction INSTANCE = new NodesShutdownAction(); - public static final String NAME = "cluster:admin/nodes/shutdown"; +class DateMethodFunctionValues extends FieldDataFunctionValues { + private final int calendarType; + private final Calendar calendar; - private NodesShutdownAction() { - super(NAME); + DateMethodFunctionValues(ValueSource parent, AtomicNumericFieldData data, int calendarType) { + super(parent, data); + + this.calendarType = calendarType; + calendar = Calendar.getInstance(TimeZone.getTimeZone("GMT"), Locale.ROOT); } @Override - public NodesShutdownResponse newResponse() { - return new NodesShutdownResponse(); - } - - @Override - public NodesShutdownRequestBuilder newRequestBuilder(ClusterAdminClient client) { - return new NodesShutdownRequestBuilder(client); + public double doubleVal(int docId) { + long millis = (long)dataAccessor.get(docId); + calendar.setTimeInMillis(millis); + return calendar.get(calendarType); } } diff --git a/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java b/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java new file mode 100644 index 00000000000..a157790e2bb --- /dev/null +++ b/src/main/java/org/elasticsearch/script/expression/DateMethodValueSource.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.script.expression; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; + +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.queries.function.FunctionValues; + +import org.elasticsearch.index.fielddata.AtomicFieldData; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; + +class DateMethodValueSource extends FieldDataValueSource { + + protected final String methodName; + protected final int calendarType; + + DateMethodValueSource(IndexFieldData indexFieldData, String methodName, int calendarType) { + super(indexFieldData); + + Objects.requireNonNull(methodName); + + this.methodName = methodName; + this.calendarType = calendarType; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + if (!super.equals(o)) return false; + + DateMethodValueSource that = (DateMethodValueSource) o; + + if (calendarType != that.calendarType) return false; + return methodName.equals(that.methodName); + + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + methodName.hashCode(); + result = 31 * result + calendarType; + return result; + } + + @Override + public FunctionValues getValues(Map context, LeafReaderContext leaf) throws IOException { + AtomicFieldData leafData = fieldData.load(leaf); + assert(leafData instanceof AtomicNumericFieldData); + + return new DateMethodFunctionValues(this, (AtomicNumericFieldData)leafData, calendarType); + } + + @Override + public String description() { + return methodName + ": field(" + fieldData.getFieldNames().toString() + ")"; + } +} diff --git a/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java b/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java index d7f43222ec2..9f4067a5997 100644 --- a/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java +++ b/src/main/java/org/elasticsearch/script/expression/ExpressionScript.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.search.Scorer; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.script.SearchScript; @@ -92,7 +91,7 @@ class ExpressionScript implements SearchScript { // We have a new binding for the scorer so we need to reset the values values = source.getValues(Collections.singletonMap("scorer", scorer), leaf); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Can't get values", e); + throw new IllegalStateException("Can't get values", e); } } diff --git a/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java b/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java index 23841942104..6d6f986432b 100644 --- a/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/expression/ExpressionScriptEngineService.java @@ -23,6 +23,7 @@ import org.apache.lucene.expressions.Expression; import org.apache.lucene.expressions.SimpleBindings; import org.apache.lucene.expressions.js.JavascriptCompiler; import org.apache.lucene.expressions.js.VariableContext; +import org.apache.lucene.queries.function.ValueSource; import org.apache.lucene.queries.function.valuesource.DoubleConstValueSource; import org.apache.lucene.search.SortField; import org.elasticsearch.common.Nullable; @@ -32,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.core.DateFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.script.CompiledScript; import org.elasticsearch.script.ExecutableScript; @@ -40,6 +42,7 @@ import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.lookup.SearchLookup; import java.text.ParseException; +import java.util.Calendar; import java.util.Map; /** @@ -50,6 +53,13 @@ public class ExpressionScriptEngineService extends AbstractComponent implements public static final String NAME = "expression"; + protected static final String GET_YEAR_METHOD = "getYear"; + protected static final String GET_MONTH_METHOD = "getMonth"; + protected static final String GET_DAY_OF_MONTH_METHOD = "getDayOfMonth"; + protected static final String GET_HOUR_OF_DAY_METHOD = "getHourOfDay"; + protected static final String GET_MINUTES_METHOD = "getMinutes"; + protected static final String GET_SECONDS_METHOD = "getSeconds"; + @Inject public ExpressionScriptEngineService(Settings settings) { super(settings); @@ -112,19 +122,30 @@ public class ExpressionScriptEngineService extends AbstractComponent implements } } else { + String fieldname = null; + String methodname = null; VariableContext[] parts = VariableContext.parse(variable); if (parts[0].text.equals("doc") == false) { throw new ExpressionScriptCompilationException("Unknown variable [" + parts[0].text + "] in expression"); } if (parts.length < 2 || parts[1].type != VariableContext.Type.STR_INDEX) { - throw new ExpressionScriptCompilationException("Variable 'doc' in expression must be used with a specific field like: doc['myfield'].value"); + throw new ExpressionScriptCompilationException("Variable 'doc' in expression must be used with a specific field like: doc['myfield']"); + } else { + fieldname = parts[1].text; } - if (parts.length < 3 || parts[2].type != VariableContext.Type.MEMBER || parts[2].text.equals("value") == false) { - throw new ExpressionScriptCompilationException("Invalid member for field data in expression. Only '.value' is currently supported."); + if (parts.length == 3) { + if (parts[2].type == VariableContext.Type.METHOD) { + methodname = parts[2].text; + } else if (parts[2].type != VariableContext.Type.MEMBER || !"value".equals(parts[2].text)) { + throw new ExpressionScriptCompilationException("Only the member variable [value] or member methods may be accessed on a field when not accessing the field directly"); + } + } + if (parts.length > 3) { + throw new ExpressionScriptCompilationException("Variable [" + variable + "] does not follow an allowed format of either doc['field'] or doc['field'].method()"); } - String fieldname = parts[1].text; FieldMapper field = mapper.smartNameFieldMapper(fieldname); + if (field == null) { throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression does not exist in mappings"); } @@ -132,14 +153,46 @@ public class ExpressionScriptEngineService extends AbstractComponent implements // TODO: more context (which expression?) throw new ExpressionScriptCompilationException("Field [" + fieldname + "] used in expression must be numeric"); } + IndexFieldData fieldData = lookup.doc().fieldDataService().getForField((NumberFieldMapper)field); - bindings.add(variable, new FieldDataValueSource(fieldData)); + if (methodname == null) { + bindings.add(variable, new FieldDataValueSource(fieldData)); + } else { + bindings.add(variable, getMethodValueSource(field, fieldData, fieldname, methodname)); + } } } return new ExpressionScript((Expression)compiledScript, bindings, specialValue); } + protected ValueSource getMethodValueSource(FieldMapper field, IndexFieldData fieldData, String fieldName, String methodName) { + switch (methodName) { + case GET_YEAR_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.YEAR); + case GET_MONTH_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.MONTH); + case GET_DAY_OF_MONTH_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.DAY_OF_MONTH); + case GET_HOUR_OF_DAY_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.HOUR_OF_DAY); + case GET_MINUTES_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.MINUTE); + case GET_SECONDS_METHOD: + return getDateMethodValueSource(field, fieldData, fieldName, methodName, Calendar.SECOND); + default: + throw new IllegalArgumentException("Member method [" + methodName + "] does not exist."); + } + } + + protected ValueSource getDateMethodValueSource(FieldMapper field, IndexFieldData fieldData, String fieldName, String methodName, int calendarType) { + if (!(field instanceof DateFieldMapper)) { + throw new IllegalArgumentException("Member method [" + methodName + "] can only be used with a date field type, not the field [" + fieldName + "]."); + } + + return new DateMethodValueSource(fieldData, methodName, calendarType); + } + @Override public ExecutableScript executable(Object compiledScript, @Nullable Map vars) { throw new UnsupportedOperationException("Cannot use expressions for updates"); diff --git a/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java b/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java index 16e3d35bb61..7a97532068a 100644 --- a/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java +++ b/src/main/java/org/elasticsearch/script/expression/FieldDataValueSource.java @@ -19,7 +19,6 @@ package org.elasticsearch.script.expression; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.queries.function.FunctionValues; import org.apache.lucene.queries.function.ValueSource; @@ -29,15 +28,18 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import java.io.IOException; import java.util.Map; +import java.util.Objects; /** * A {@link ValueSource} wrapper for field data. */ class FieldDataValueSource extends ValueSource { - IndexFieldData fieldData; + protected IndexFieldData fieldData; + + protected FieldDataValueSource(IndexFieldData d) { + Objects.requireNonNull(d); - FieldDataValueSource(IndexFieldData d) { fieldData = d; } @@ -49,8 +51,13 @@ class FieldDataValueSource extends ValueSource { } @Override - public boolean equals(Object other) { - return fieldData.equals(other); + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + FieldDataValueSource that = (FieldDataValueSource) o; + + return fieldData.equals(that.fieldData); } @Override diff --git a/src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java b/src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java deleted file mode 100644 index aa7ca8db65c..00000000000 --- a/src/main/java/org/elasticsearch/script/groovy/GroovySandboxExpressionChecker.java +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script.groovy; - -import com.google.common.collect.ImmutableSet; -import org.codehaus.groovy.ast.ClassNode; -import org.codehaus.groovy.ast.expr.*; -import org.codehaus.groovy.control.customizers.SecureASTCustomizer; -import org.elasticsearch.common.settings.Settings; - -import java.util.ArrayList; -import java.util.List; -import java.util.Set; - -import static com.google.common.collect.Lists.newArrayList; - -/** - * Class used to determine whether a Groovy expression should be allowed. - * During compilation, every expression is passed to the - * isAuthorized method, which returns true to allow that method - * and false to block it. Includes all of the sandbox-related whitelist and - * blacklist options. - */ -public class GroovySandboxExpressionChecker implements SecureASTCustomizer.ExpressionChecker { - - public static String GROOVY_SANDBOX_METHOD_BLACKLIST = "script.groovy.sandbox.method_blacklist"; - public static String GROOVY_SANDBOX_PACKAGE_WHITELIST = "script.groovy.sandbox.package_whitelist"; - public static String GROOVY_SANDBOX_CLASS_WHITELIST = "script.groovy.sandbox.class_whitelist"; - public static String GROOVY_SCRIPT_SANDBOX_RECEIVER_WHITELIST = "script.groovy.sandbox.receiver_whitelist"; - - private final Set methodBlacklist; - private final Set additionalMethodBlacklist; - private final Set packageWhitelist; - private final Set classWhitelist; - - public GroovySandboxExpressionChecker(Settings settings, Set blacklistAdditions) { - this.methodBlacklist = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SANDBOX_METHOD_BLACKLIST, defaultMethodBlacklist, true)); - this.additionalMethodBlacklist = ImmutableSet.copyOf(blacklistAdditions); - this.packageWhitelist = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SANDBOX_PACKAGE_WHITELIST, defaultPackageWhitelist, true)); - this.classWhitelist = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SANDBOX_CLASS_WHITELIST, defaultClassConstructionWhitelist, true)); - } - - // Never allow calling these methods, regardless of the object type - public static String[] defaultMethodBlacklist = new String[]{ - "getClass", - "class", - "forName", - "wait", - "notify", - "notifyAll", - "invokeMethod", - "finalize" - }; - - // Only instances of these classes in these packages can be instantiated - public static String[] defaultPackageWhitelist = new String[] {"java.util", "java.lang", "org.joda.time"}; - - // Classes that are allowed to be constructed - public static String[] defaultClassConstructionWhitelist = new String[]{ - java.util.Date.class.getName(), - java.util.Map.class.getName(), - java.util.List.class.getName(), - java.util.Set.class.getName(), - java.util.ArrayList.class.getName(), - java.util.Arrays.class.getName(), - java.util.HashMap.class.getName(), - java.util.HashSet.class.getName(), - java.util.UUID.class.getName(), - java.math.BigDecimal.class.getName(), - org.joda.time.DateTime.class.getName(), - org.joda.time.DateTimeZone.class.getName() - }; - - // Default whitelisted receiver classes for the Groovy sandbox - private final static String[] defaultReceiverWhitelist = new String [] { - groovy.util.GroovyCollections.class.getName(), - java.lang.Math.class.getName(), - java.lang.Integer.class.getName(), "[I", "[[I", "[[[I", - java.lang.Float.class.getName(), "[F", "[[F", "[[[F", - java.lang.Double.class.getName(), "[D", "[[D", "[[[D", - java.lang.Long.class.getName(), "[J", "[[J", "[[[J", - java.lang.Short.class.getName(), "[S", "[[S", "[[[S", - java.lang.Character.class.getName(), "[C", "[[C", "[[[C", - java.lang.Byte.class.getName(), "[B", "[[B", "[[[B", - java.lang.Boolean.class.getName(), "[Z", "[[Z", "[[[Z", - java.math.BigDecimal.class.getName(), - java.util.Arrays.class.getName(), - java.util.Date.class.getName(), - java.util.List.class.getName(), - java.util.Map.class.getName(), - java.util.Set.class.getName(), - java.lang.Object.class.getName(), - org.joda.time.DateTime.class.getName(), - org.joda.time.DateTimeUtils.class.getName(), - org.joda.time.DateTimeZone.class.getName(), - org.joda.time.Instant.class.getName() - }; - - /** - * Checks whether the expression to be compiled is allowed - */ - @Override - public boolean isAuthorized(Expression expression) { - if (expression instanceof MethodPointerExpression) { - return false; - } else if (expression instanceof MethodCallExpression) { - MethodCallExpression mce = (MethodCallExpression) expression; - String methodName = mce.getMethodAsString(); - if (methodBlacklist.contains(methodName)) { - return false; - } else if (additionalMethodBlacklist.contains(methodName)) { - return false; - } else if (methodName == null && mce.getMethod() instanceof GStringExpression) { - // We do not allow GStrings for method invocation, they are a security risk - return false; - } - } else if (expression instanceof ConstructorCallExpression) { - ConstructorCallExpression cce = (ConstructorCallExpression) expression; - ClassNode type = cce.getType(); - if (!packageWhitelist.contains(type.getPackageName())) { - return false; - } - if (!classWhitelist.contains(type.getName())) { - return false; - } - } - return true; - } - - /** - * Returns a customized ASTCustomizer that includes the whitelists and - * expression checker. - */ - public static SecureASTCustomizer getSecureASTCustomizer(Settings settings, Set blacklistAdditions) { - SecureASTCustomizer scz = new SecureASTCustomizer(); - // Closures are allowed - scz.setClosuresAllowed(true); - // But defining methods is not - scz.setMethodDefinitionAllowed(false); - // Only allow the imports that we explicitly call out - List importWhitelist = new ArrayList<>(); - importWhitelist.addAll(ImmutableSet.copyOf(GroovySandboxExpressionChecker.defaultClassConstructionWhitelist)); - scz.setImportsWhitelist(importWhitelist); - // Package definitions are not allowed - scz.setPackageAllowed(false); - // White-listed receivers of method calls - String[] receiverWhitelist = settings.getAsArray(GROOVY_SCRIPT_SANDBOX_RECEIVER_WHITELIST, defaultReceiverWhitelist, true); - scz.setReceiversWhiteList(newArrayList(receiverWhitelist)); - // Add the customized expression checker for finer-grained checking - scz.addExpressionCheckers(new GroovySandboxExpressionChecker(settings, blacklistAdditions)); - return scz; - } -} diff --git a/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java b/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java index 6e0e6aef518..96d63f13cc8 100644 --- a/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java +++ b/src/main/java/org/elasticsearch/script/groovy/GroovyScriptEngineService.java @@ -22,9 +22,6 @@ package org.elasticsearch.script.groovy; import groovy.lang.Binding; import groovy.lang.GroovyClassLoader; import groovy.lang.Script; - -import com.google.common.collect.ImmutableSet; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Scorer; import org.codehaus.groovy.ast.ClassCodeExpressionTransformer; @@ -40,28 +37,18 @@ import org.codehaus.groovy.control.customizers.CompilationCustomizer; import org.codehaus.groovy.control.customizers.ImportCustomizer; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.CompiledScript; -import org.elasticsearch.script.ExecutableScript; -import org.elasticsearch.script.LeafSearchScript; -import org.elasticsearch.script.ScoreAccessor; -import org.elasticsearch.script.ScriptEngineService; -import org.elasticsearch.script.ScriptException; -import org.elasticsearch.script.SearchScript; +import org.elasticsearch.script.*; import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SearchLookup; import java.io.IOException; import java.math.BigDecimal; -import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.Map; -import java.util.Set; import java.util.concurrent.atomic.AtomicLong; /** @@ -70,47 +57,17 @@ import java.util.concurrent.atomic.AtomicLong; public class GroovyScriptEngineService extends AbstractComponent implements ScriptEngineService { public static final String NAME = "groovy"; - public static String GROOVY_SCRIPT_SANDBOX_ENABLED = "script.groovy.sandbox.enabled"; - public static String GROOVY_SCRIPT_BLACKLIST_PATCH = "script.groovy.sandbox.method_blacklist_patch"; - private final AtomicLong counter = new AtomicLong(); - private final boolean sandboxed; - private volatile GroovyClassLoader loader; - private volatile Set blacklistAdditions; + private final GroovyClassLoader loader; @Inject public GroovyScriptEngineService(Settings settings) { super(settings); - this.sandboxed = settings.getAsBoolean(GROOVY_SCRIPT_SANDBOX_ENABLED, false); - this.blacklistAdditions = ImmutableSet.copyOf(settings.getAsArray(GROOVY_SCRIPT_BLACKLIST_PATCH, Strings.EMPTY_ARRAY)); - reloadConfig(); - } - - public Set blacklistAdditions() { - return this.blacklistAdditions; - } - - /** - * Appends the additional blacklisted methods to the current blacklist, - * returns true if the black list has changed - */ - public boolean addToBlacklist(String... additions) { - Set newBlackList = new HashSet<>(blacklistAdditions); - Collections.addAll(newBlackList, additions); - boolean changed = this.blacklistAdditions.equals(newBlackList) == false; - this.blacklistAdditions = ImmutableSet.copyOf(newBlackList); - return changed; - } - - public void reloadConfig() { ImportCustomizer imports = new ImportCustomizer(); imports.addStarImports("org.joda.time"); imports.addStaticStars("java.lang.Math"); CompilerConfiguration config = new CompilerConfiguration(); config.addCompilationCustomizers(imports); - if (this.sandboxed) { - config.addCompilationCustomizers(GroovySandboxExpressionChecker.getSecureASTCustomizer(settings, this.blacklistAdditions)); - } // Add BigDecimal -> Double transformer config.addCompilationCustomizers(new GroovyBigDecimalTransformer(CompilePhase.CONVERSION)); this.loader = new GroovyClassLoader(settings.getClassLoader(), config); @@ -148,7 +105,7 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri @Override public boolean sandboxed() { - return this.sandboxed; + return false; } @Override @@ -360,5 +317,4 @@ public class GroovyScriptEngineService extends AbstractComponent implements Scri return super.transform(newExpr); } } - } diff --git a/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java b/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java index ebec396131e..7734d0334bf 100644 --- a/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java +++ b/src/main/java/org/elasticsearch/script/mustache/JsonEscapingMustacheFactory.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.script.mustache; +import com.fasterxml.jackson.core.io.JsonStringEncoder; import com.github.mustachejava.DefaultMustacheFactory; import com.github.mustachejava.MustacheException; @@ -28,40 +29,14 @@ import java.io.Writer; * A MustacheFactory that does simple JSON escaping. */ public final class JsonEscapingMustacheFactory extends DefaultMustacheFactory { - + @Override public void encode(String value, Writer writer) { try { - escape(value, writer); + JsonStringEncoder utils = new JsonStringEncoder(); + writer.write(utils.quoteAsString(value));; } catch (IOException e) { throw new MustacheException("Failed to encode value: " + value); } } - - public static Writer escape(String value, Writer writer) throws IOException { - for (int i = 0; i < value.length(); i++) { - final char character = value.charAt(i); - if (isEscapeChar(character)) { - writer.write('\\'); - } - writer.write(character); - } - return writer; - } - - public static boolean isEscapeChar(char c) { - switch(c) { - case '\b': - case '\f': - case '\n': - case '\r': - case '"': - case '\\': - case '\u000B': // vertical tab - case '\t': - return true; - } - return false; - } - } diff --git a/src/main/java/org/elasticsearch/search/MultiValueMode.java b/src/main/java/org/elasticsearch/search/MultiValueMode.java index c1f41cb92e0..839d4714dbe 100644 --- a/src/main/java/org/elasticsearch/search/MultiValueMode.java +++ b/src/main/java/org/elasticsearch/search/MultiValueMode.java @@ -27,7 +27,6 @@ import org.apache.lucene.util.Bits; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.index.fielddata.NumericDoubleValues; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -375,13 +374,13 @@ public enum MultiValueMode { /** * A case insensitive version of {@link #valueOf(String)} * - * @throws org.elasticsearch.ElasticsearchIllegalArgumentException if the given string doesn't match a sort mode or is null. + * @throws IllegalArgumentException if the given string doesn't match a sort mode or is null. */ public static MultiValueMode fromString(String sortMode) { try { return valueOf(sortMode.toUpperCase(Locale.ROOT)); } catch (Throwable t) { - throw new ElasticsearchIllegalArgumentException("Illegal sort_mode " + sortMode); + throw new IllegalArgumentException("Illegal sort_mode " + sortMode); } } @@ -615,7 +614,7 @@ public enum MultiValueMode { } protected BytesRef pick(SortedBinaryDocValues values, BytesRef missingValue) { - throw new ElasticsearchIllegalArgumentException("Unsupported sort mode: " + this); + throw new IllegalArgumentException("Unsupported sort mode: " + this); } /** @@ -735,7 +734,7 @@ public enum MultiValueMode { } protected int pick(RandomAccessOrds values) { - throw new ElasticsearchIllegalArgumentException("Unsupported sort mode: " + this); + throw new IllegalArgumentException("Unsupported sort mode: " + this); } /** diff --git a/src/main/java/org/elasticsearch/search/SearchContextException.java b/src/main/java/org/elasticsearch/search/SearchContextException.java index 39bea64188f..599515830e1 100644 --- a/src/main/java/org/elasticsearch/search/SearchContextException.java +++ b/src/main/java/org/elasticsearch/search/SearchContextException.java @@ -35,19 +35,6 @@ public class SearchContextException extends SearchException { } private static String buildMessage(SearchContext context, String msg) { - StringBuilder sb = new StringBuilder(); - sb.append('[').append(context.shardTarget().index()).append("][").append(context.shardTarget().shardId()).append("]: "); - if (context.parsedQuery() != null) { - try { - sb.append("query[").append(context.parsedQuery().query()).append("],"); - } catch (Exception e) { - sb.append("query[_failed_to_string_],"); - } - } - sb.append("from[").append(context.from()).append("],size[").append(context.size()).append("]"); - if (context.sort() != null) { - sb.append(",sort[").append(context.sort()).append("]"); - } - return sb.append(": ").append(msg).toString(); + return msg; } } diff --git a/src/main/java/org/elasticsearch/search/SearchParseException.java b/src/main/java/org/elasticsearch/search/SearchParseException.java index 5f528f2d57a..15c6bfd6f90 100644 --- a/src/main/java/org/elasticsearch/search/SearchParseException.java +++ b/src/main/java/org/elasticsearch/search/SearchParseException.java @@ -19,24 +19,64 @@ package org.elasticsearch.search; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.internal.SearchContext; +import java.io.IOException; + /** * */ public class SearchParseException extends SearchContextException { - public SearchParseException(SearchContext context, String msg) { - super(context, "Parse Failure [" + msg + "]"); + public static final int UNKNOWN_POSITION = -1; + private int lineNumber = UNKNOWN_POSITION; + private int columnNumber = UNKNOWN_POSITION; + + public SearchParseException(SearchContext context, String msg, @Nullable XContentLocation location) { + this(context, msg, location, null); } - public SearchParseException(SearchContext context, String msg, Throwable cause) { - super(context, "Parse Failure [" + msg + "]", cause); + public SearchParseException(SearchContext context, String msg, @Nullable XContentLocation location, Throwable cause) { + super(context, msg, cause); + if (location != null) { + lineNumber = location.lineNumber; + columnNumber = location.columnNumber; + } } @Override public RestStatus status() { return RestStatus.BAD_REQUEST; } + + @Override + protected void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (lineNumber != UNKNOWN_POSITION) { + builder.field("line", lineNumber); + builder.field("col", columnNumber); + } + super.innerToXContent(builder, params); + } + + /** + * Line number of the location of the error + * + * @return the line number or -1 if unknown + */ + public int getLineNumber() { + return lineNumber; + } + + /** + * Column number of the location of the error + * + * @return the column number or -1 if unknown + */ + public int getColumnNumber() { + return columnNumber; + } } diff --git a/src/main/java/org/elasticsearch/search/SearchPhase.java b/src/main/java/org/elasticsearch/search/SearchPhase.java index 30013b5471c..bdab128fecb 100644 --- a/src/main/java/org/elasticsearch/search/SearchPhase.java +++ b/src/main/java/org/elasticsearch/search/SearchPhase.java @@ -36,5 +36,5 @@ public interface SearchPhase { */ void preProcess(SearchContext context); - void execute(SearchContext context) throws ElasticsearchException; + void execute(SearchContext context); } diff --git a/src/main/java/org/elasticsearch/search/SearchService.java b/src/main/java/org/elasticsearch/search/SearchService.java index 64762395811..057b92a5912 100644 --- a/src/main/java/org/elasticsearch/search/SearchService.java +++ b/src/main/java/org/elasticsearch/search/SearchService.java @@ -24,12 +24,12 @@ import com.carrotsearch.hppc.ObjectSet; import com.carrotsearch.hppc.cursors.ObjectCursor; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableMap; + import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.search.TopDocs; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchType; @@ -52,6 +52,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.cache.IndexCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -71,17 +72,30 @@ import org.elasticsearch.indices.IndicesWarmer.TerminationHandle; import org.elasticsearch.indices.IndicesWarmer.WarmerContext; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.ScriptContextRegistry; import org.elasticsearch.script.mustache.MustacheScriptEngineService; import org.elasticsearch.search.dfs.CachedDfSource; import org.elasticsearch.search.dfs.DfsPhase; import org.elasticsearch.search.dfs.DfsSearchResult; -import org.elasticsearch.search.fetch.*; -import org.elasticsearch.search.internal.*; +import org.elasticsearch.search.fetch.FetchPhase; +import org.elasticsearch.search.fetch.FetchSearchResult; +import org.elasticsearch.search.fetch.QueryFetchSearchResult; +import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; +import org.elasticsearch.search.fetch.ShardFetchRequest; +import org.elasticsearch.search.internal.DefaultSearchContext; +import org.elasticsearch.search.internal.InternalScrollSearchRequest; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; -import org.elasticsearch.search.query.*; +import org.elasticsearch.search.internal.ShardSearchLocalRequest; +import org.elasticsearch.search.internal.ShardSearchRequest; +import org.elasticsearch.search.query.QueryPhase; +import org.elasticsearch.search.query.QueryPhaseExecutionException; +import org.elasticsearch.search.query.QuerySearchRequest; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.search.query.QuerySearchResultProvider; +import org.elasticsearch.search.query.ScrollQuerySearchResult; import org.elasticsearch.search.warmer.IndexWarmersMetaData; import org.elasticsearch.threadpool.ThreadPool; @@ -193,23 +207,23 @@ public class SearchService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { for (final SearchContext context : activeContexts.values()) { freeContext(context.id()); } } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { doStop(); FutureUtils.cancel(keepAliveReaper); } - public DfsSearchResult executeDfsPhase(ShardSearchRequest request) throws ElasticsearchException { + public DfsSearchResult executeDfsPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); try { contextProcessing(context); @@ -225,12 +239,12 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QuerySearchResult executeScan(ShardSearchRequest request) throws ElasticsearchException { + public QuerySearchResult executeScan(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); final int originalSize = context.size(); try { if (context.aggregations() != null) { - throw new ElasticsearchIllegalArgumentException("aggregations are not supported with search_type=scan"); + throw new IllegalArgumentException("aggregations are not supported with search_type=scan"); } if (context.scroll() == null) { @@ -256,7 +270,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) throws ElasticsearchException { + public ScrollQueryFetchSearchResult executeScan(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -297,7 +311,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) throws ElasticsearchException { + public QuerySearchResultProvider executeQueryPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); try { context.indexShard().searchService().onPreQueryPhase(context); @@ -328,7 +342,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) throws ElasticsearchException { + public ScrollQuerySearchResult executeQueryPhase(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); try { context.indexShard().searchService().onPreQueryPhase(context); @@ -349,11 +363,13 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QuerySearchResult executeQueryPhase(QuerySearchRequest request) throws ElasticsearchException { + public QuerySearchResult executeQueryPhase(QuerySearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { - context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity())); + final IndexCache indexCache = context.indexShard().indexService().cache(); + context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), + indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); @@ -381,7 +397,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) throws ElasticsearchException { + public QueryFetchSearchResult executeFetchPhase(ShardSearchRequest request) { final SearchContext context = createAndPutContext(request); contextProcessing(context); try { @@ -419,11 +435,13 @@ public class SearchService extends AbstractLifecycleComponent { } } - public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) throws ElasticsearchException { + public QueryFetchSearchResult executeFetchPhase(QuerySearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { - context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity())); + final IndexCache indexCache = context.indexShard().indexService().cache(); + context.searcher().dfSource(new CachedDfSource(context.searcher().getIndexReader(), request.dfs(), context.similarityService().similarity(), + indexCache.filter(), indexCache.filterPolicy())); } catch (Throwable e) { freeContext(context.id()); cleanContext(context); @@ -464,7 +482,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) throws ElasticsearchException { + public ScrollQueryFetchSearchResult executeFetchPhase(InternalScrollSearchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -503,7 +521,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - public FetchSearchResult executeFetchPhase(ShardFetchRequest request) throws ElasticsearchException { + public FetchSearchResult executeFetchPhase(ShardFetchRequest request) { final SearchContext context = findContext(request.id()); contextProcessing(context); try { @@ -540,7 +558,7 @@ public class SearchService extends AbstractLifecycleComponent { return context; } - final SearchContext createAndPutContext(ShardSearchRequest request) throws ElasticsearchException { + final SearchContext createAndPutContext(ShardSearchRequest request) { SearchContext context = createContext(request, null); boolean success = false; try { @@ -555,7 +573,7 @@ public class SearchService extends AbstractLifecycleComponent { } } - final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws ElasticsearchException { + final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) { IndexService indexService = indicesService.indexServiceSafe(request.index()); IndexShard indexShard = indexService.shardSafe(request.shardId()); @@ -653,7 +671,7 @@ public class SearchService extends AbstractLifecycleComponent { final ExecutableScript executable; if (hasLength(request.templateName())) { - executable = this.scriptService.executable(MustacheScriptEngineService.NAME, request.templateName(), request.templateType(), ScriptContext.Standard.SEARCH, request.templateParams()); + executable = this.scriptService.executable(new Script(MustacheScriptEngineService.NAME, request.templateName(), request.templateType(), request.templateParams()), ScriptContext.Standard.SEARCH); } else { if (!hasLength(request.templateSource())) { return; @@ -693,7 +711,7 @@ public class SearchService extends AbstractLifecycleComponent { if (!hasLength(templateContext.template())) { throw new ElasticsearchParseException("Template must have [template] field configured"); } - executable = this.scriptService.executable(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), ScriptContext.Standard.SEARCH, templateContext.params()); + executable = this.scriptService.executable(new Script(MustacheScriptEngineService.NAME, templateContext.template(), templateContext.scriptType(), templateContext.params()), ScriptContext.Standard.SEARCH); } BytesReference processedQuery = (BytesReference) executable.run(); @@ -719,7 +737,7 @@ public class SearchService extends AbstractLifecycleComponent { parser.nextToken(); SearchParseElement element = elementParsers.get(fieldName); if (element == null) { - throw new SearchParseException(context, "No parser for element [" + fieldName + "]"); + throw new SearchParseException(context, "No parser for element [" + fieldName + "]", parser.getTokenLocation()); } element.parse(parser, context); } else { @@ -737,7 +755,7 @@ public class SearchService extends AbstractLifecycleComponent { } catch (Throwable e1) { // ignore } - throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", e); + throw new SearchParseException(context, "Failed to parse source [" + sSource + "]", parser.getTokenLocation(), e); } finally { if (parser != null) { parser.close(); diff --git a/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java b/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java index 13fe4bd15fb..5730a023554 100644 --- a/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java +++ b/src/main/java/org/elasticsearch/search/action/SearchServiceTransportAction.java @@ -76,20 +76,20 @@ public class SearchServiceTransportAction extends AbstractComponent { this.transportService = transportService; this.searchService = searchService; - transportService.registerHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, new ScrollFreeContextTransportHandler()); - transportService.registerHandler(FREE_CONTEXT_ACTION_NAME, new SearchFreeContextTransportHandler()); - transportService.registerHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsTransportHandler()); - transportService.registerHandler(DFS_ACTION_NAME, new SearchDfsTransportHandler()); - transportService.registerHandler(QUERY_ACTION_NAME, new SearchQueryTransportHandler()); - transportService.registerHandler(QUERY_ID_ACTION_NAME, new SearchQueryByIdTransportHandler()); - transportService.registerHandler(QUERY_SCROLL_ACTION_NAME, new SearchQueryScrollTransportHandler()); - transportService.registerHandler(QUERY_FETCH_ACTION_NAME, new SearchQueryFetchTransportHandler()); - transportService.registerHandler(QUERY_QUERY_FETCH_ACTION_NAME, new SearchQueryQueryFetchTransportHandler()); - transportService.registerHandler(QUERY_FETCH_SCROLL_ACTION_NAME, new SearchQueryFetchScrollTransportHandler()); - transportService.registerHandler(FETCH_ID_SCROLL_ACTION_NAME, new ScrollFetchByIdTransportHandler()); - transportService.registerHandler(FETCH_ID_ACTION_NAME, new SearchFetchByIdTransportHandler()); - transportService.registerHandler(SCAN_ACTION_NAME, new SearchScanTransportHandler()); - transportService.registerHandler(SCAN_SCROLL_ACTION_NAME, new SearchScanScrollTransportHandler()); + transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest.class, ThreadPool.Names.SAME, new FreeContextTransportHandler<>()); + transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest.class, ThreadPool.Names.SAME, new FreeContextTransportHandler()); + transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest.class, ThreadPool.Names.SAME, new ClearScrollContextsTransportHandler()); + transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchDfsTransportHandler()); + transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchQueryTransportHandler()); + transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryByIdTransportHandler()); + transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryScrollTransportHandler()); + transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchQueryFetchTransportHandler()); + transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryQueryFetchTransportHandler()); + transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest.class, ThreadPool.Names.SEARCH, new SearchQueryFetchScrollTransportHandler()); + transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest.class, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler<>()); + transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest.class, ThreadPool.Names.SEARCH, new FetchByIdTransportHandler()); + transportService.registerRequestHandler(SCAN_ACTION_NAME, ShardSearchTransportRequest.class, ThreadPool.Names.SEARCH, new SearchScanTransportHandler()); + transportService.registerRequestHandler(SCAN_SCROLL_ACTION_NAME, InternalScrollSearchRequest.class, ThreadPool.Names.SEARCH, new SearchScanScrollTransportHandler()); } public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) { @@ -327,36 +327,12 @@ public class SearchServiceTransportAction extends AbstractComponent { } } - private abstract class BaseFreeContextTransportHandler extends BaseTransportRequestHandler { - @Override - public abstract FreeContextRequest newInstance(); - + class FreeContextTransportHandler implements TransportRequestHandler { @Override public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception { boolean freed = searchService.freeContext(request.id()); channel.sendResponse(new SearchFreeContextResponse(freed)); } - - @Override - public String executor() { - // freeing the context is cheap, - // no need for fork it to another thread - return ThreadPool.Names.SAME; - } - } - - class ScrollFreeContextTransportHandler extends BaseFreeContextTransportHandler { - @Override - public ScrollFreeContextRequest newInstance() { - return new ScrollFreeContextRequest(); - } - } - - class SearchFreeContextTransportHandler extends BaseFreeContextTransportHandler { - @Override - public SearchFreeContextRequest newInstance() { - return new SearchFreeContextRequest(); - } } static class ClearScrollContextsRequest extends TransportRequest { @@ -370,226 +346,91 @@ public class SearchServiceTransportAction extends AbstractComponent { } - class ClearScrollContextsTransportHandler extends BaseTransportRequestHandler { - - @Override - public ClearScrollContextsRequest newInstance() { - return new ClearScrollContextsRequest(); - } - + class ClearScrollContextsTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception { searchService.freeAllScrollContexts(); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - // freeing the context is cheap, - // no need for fork it to another thread - return ThreadPool.Names.SAME; - } } - private class SearchDfsTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchDfsTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { DfsSearchResult result = searchService.executeDfsPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchQueryTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { QuerySearchResultProvider result = searchService.executeQueryPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryByIdTransportHandler extends BaseTransportRequestHandler { - - @Override - public QuerySearchRequest newInstance() { - return new QuerySearchRequest(); - } - + class SearchQueryByIdTransportHandler implements TransportRequestHandler { @Override public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception { QuerySearchResult result = searchService.executeQueryPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryScrollTransportHandler extends BaseTransportRequestHandler { - - @Override - public InternalScrollSearchRequest newInstance() { - return new InternalScrollSearchRequest(); - } - + class SearchQueryScrollTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { ScrollQuerySearchResult result = searchService.executeQueryPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryFetchTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchQueryFetchTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { QueryFetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchQueryQueryFetchTransportHandler extends BaseTransportRequestHandler { - - @Override - public QuerySearchRequest newInstance() { - return new QuerySearchRequest(); - } - + class SearchQueryQueryFetchTransportHandler implements TransportRequestHandler { @Override public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception { QueryFetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private abstract class FetchByIdTransportHandler extends BaseTransportRequestHandler { - - @Override - public abstract Request newInstance(); - + class FetchByIdTransportHandler implements TransportRequestHandler { @Override public void messageReceived(Request request, TransportChannel channel) throws Exception { FetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class ScrollFetchByIdTransportHandler extends FetchByIdTransportHandler { - @Override - public ShardFetchRequest newInstance() { - return new ShardFetchRequest(); - } - } - - private class SearchFetchByIdTransportHandler extends FetchByIdTransportHandler { - @Override - public ShardFetchSearchRequest newInstance() { - return new ShardFetchSearchRequest(); - } - } - - private class SearchQueryFetchScrollTransportHandler extends BaseTransportRequestHandler { - - @Override - public InternalScrollSearchRequest newInstance() { - return new InternalScrollSearchRequest(); - } - + class SearchQueryFetchScrollTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchScanTransportHandler extends BaseTransportRequestHandler { - - @Override - public ShardSearchTransportRequest newInstance() { - return new ShardSearchTransportRequest(); - } - + class SearchScanTransportHandler implements TransportRequestHandler { @Override public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception { QuerySearchResult result = searchService.executeScan(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } - private class SearchScanScrollTransportHandler extends BaseTransportRequestHandler { - - @Override - public InternalScrollSearchRequest newInstance() { - return new InternalScrollSearchRequest(); - } - + class SearchScanScrollTransportHandler implements TransportRequestHandler { @Override public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception { ScrollQueryFetchSearchResult result = searchService.executeScan(request); channel.sendResponse(result); } - - @Override - public String executor() { - return ThreadPool.Names.SEARCH; - } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java index 5b9fab55aa4..d41daa7363f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationBuilder.java @@ -20,6 +20,7 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.Lists; + import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesArray; diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java index 607757fb682..2f6e929071f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationModule.java @@ -56,6 +56,12 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.ExtendedStat import org.elasticsearch.search.aggregations.metrics.sum.SumParser; import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsParser; import org.elasticsearch.search.aggregations.metrics.valuecount.ValueCountParser; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketParser; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketParser; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelModule; import java.util.List; @@ -64,40 +70,46 @@ import java.util.List; */ public class AggregationModule extends AbstractModule implements SpawnModules{ - private List> parsers = Lists.newArrayList(); + private List> aggParsers = Lists.newArrayList(); + private List> reducerParsers = Lists.newArrayList(); public AggregationModule() { - parsers.add(AvgParser.class); - parsers.add(SumParser.class); - parsers.add(MinParser.class); - parsers.add(MaxParser.class); - parsers.add(StatsParser.class); - parsers.add(ExtendedStatsParser.class); - parsers.add(ValueCountParser.class); - parsers.add(PercentilesParser.class); - parsers.add(PercentileRanksParser.class); - parsers.add(CardinalityParser.class); + aggParsers.add(AvgParser.class); + aggParsers.add(SumParser.class); + aggParsers.add(MinParser.class); + aggParsers.add(MaxParser.class); + aggParsers.add(StatsParser.class); + aggParsers.add(ExtendedStatsParser.class); + aggParsers.add(ValueCountParser.class); + aggParsers.add(PercentilesParser.class); + aggParsers.add(PercentileRanksParser.class); + aggParsers.add(CardinalityParser.class); - parsers.add(GlobalParser.class); - parsers.add(MissingParser.class); - parsers.add(FilterParser.class); - parsers.add(FiltersParser.class); - parsers.add(SamplerParser.class); - parsers.add(TermsParser.class); - parsers.add(SignificantTermsParser.class); - parsers.add(RangeParser.class); - parsers.add(DateRangeParser.class); - parsers.add(IpRangeParser.class); - parsers.add(HistogramParser.class); - parsers.add(DateHistogramParser.class); - parsers.add(GeoDistanceParser.class); - parsers.add(GeoHashGridParser.class); - parsers.add(NestedParser.class); - parsers.add(ReverseNestedParser.class); - parsers.add(TopHitsParser.class); - parsers.add(GeoBoundsParser.class); - parsers.add(ScriptedMetricParser.class); - parsers.add(ChildrenParser.class); + aggParsers.add(GlobalParser.class); + aggParsers.add(MissingParser.class); + aggParsers.add(FilterParser.class); + aggParsers.add(FiltersParser.class); + aggParsers.add(SamplerParser.class); + aggParsers.add(TermsParser.class); + aggParsers.add(SignificantTermsParser.class); + aggParsers.add(RangeParser.class); + aggParsers.add(DateRangeParser.class); + aggParsers.add(IpRangeParser.class); + aggParsers.add(HistogramParser.class); + aggParsers.add(DateHistogramParser.class); + aggParsers.add(GeoDistanceParser.class); + aggParsers.add(GeoHashGridParser.class); + aggParsers.add(NestedParser.class); + aggParsers.add(ReverseNestedParser.class); + aggParsers.add(TopHitsParser.class); + aggParsers.add(GeoBoundsParser.class); + aggParsers.add(ScriptedMetricParser.class); + aggParsers.add(ChildrenParser.class); + + reducerParsers.add(DerivativeParser.class); + reducerParsers.add(MaxBucketParser.class); + reducerParsers.add(MinBucketParser.class); + reducerParsers.add(MovAvgParser.class); } /** @@ -106,14 +118,18 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ * @param parser The parser for the custom aggregator. */ public void addAggregatorParser(Class parser) { - parsers.add(parser); + aggParsers.add(parser); } @Override protected void configure() { - Multibinder multibinder = Multibinder.newSetBinder(binder(), Aggregator.Parser.class); - for (Class parser : parsers) { - multibinder.addBinding().to(parser); + Multibinder multibinderAggParser = Multibinder.newSetBinder(binder(), Aggregator.Parser.class); + for (Class parser : aggParsers) { + multibinderAggParser.addBinding().to(parser); + } + Multibinder multibinderReducerParser = Multibinder.newSetBinder(binder(), Reducer.Parser.class); + for (Class parser : reducerParsers) { + multibinderReducerParser.addBinding().to(parser); } bind(AggregatorParsers.class).asEagerSingleton(); bind(AggregationParseElement.class).asEagerSingleton(); @@ -122,7 +138,7 @@ public class AggregationModule extends AbstractModule implements SpawnModules{ @Override public Iterable spawnModules() { - return ImmutableList.of(new SignificantTermsHeuristicModule()); + return ImmutableList.of(new SignificantTermsHeuristicModule(), new MovAvgModelModule()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 118b37e386d..717834b045c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -20,8 +20,8 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableMap; -import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.inject.Inject; @@ -29,6 +29,8 @@ import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchPhase; import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.query.QueryPhaseExecutionException; @@ -74,7 +76,8 @@ public class AggregationPhase implements SearchPhase { List collectors = new ArrayList<>(); Aggregator[] aggregators; try { - aggregators = context.aggregations().factories().createTopLevelAggregators(aggregationContext); + AggregatorFactories factories = context.aggregations().factories(); + aggregators = factories.createTopLevelAggregators(aggregationContext); for (int i = 0; i < aggregators.length; i++) { if (aggregators[i] instanceof GlobalAggregator == false) { collectors.add(aggregators[i]); @@ -93,7 +96,7 @@ public class AggregationPhase implements SearchPhase { } @Override - public void execute(SearchContext context) throws ElasticsearchException { + public void execute(SearchContext context) { if (context.aggregations() == null) { context.queryResult().aggregations(null); return; @@ -116,9 +119,12 @@ public class AggregationPhase implements SearchPhase { if (!globals.isEmpty()) { BucketCollector globalsCollector = BucketCollector.wrap(globals); Query query = Queries.newMatchAllQuery(); - Filter searchFilter = context.searchFilter(context.types()); + Query searchFilter = context.searchFilter(context.types()); if (searchFilter != null) { - query = new FilteredQuery(query, searchFilter); + BooleanQuery filtered = new BooleanQuery(); + filtered.add(query, Occur.MUST); + filtered.add(searchFilter, Occur.FILTER); + query = filtered; } try { globalsCollector.preCollection(); @@ -138,6 +144,21 @@ public class AggregationPhase implements SearchPhase { } } context.queryResult().aggregations(new InternalAggregations(aggregations)); + try { + List reducers = context.aggregations().factories().createReducers(); + List siblingReducers = new ArrayList<>(reducers.size()); + for (Reducer reducer : reducers) { + if (reducer instanceof SiblingReducer) { + siblingReducers.add((SiblingReducer) reducer); + } else { + throw new AggregationExecutionException("Invalid reducer named [" + reducer.name() + "] of type [" + + reducer.type().name() + "]. Only sibling reducers are allowed at the top level"); + } + } + context.queryResult().reducers(siblingReducers); + } catch (IOException e) { + throw new AggregationExecutionException("Failed to build top level reducers", e); + } // disable aggregations so that they don't run on next pages in case of scrolling context.aggregations(null); diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java index 9a2fa3a8a57..bb1e18be09d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorBase.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.search.aggregations.bucket.BestBucketsDeferringCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; import org.elasticsearch.search.query.QueryPhaseExecutionException; @@ -46,6 +47,7 @@ public abstract class AggregatorBase extends Aggregator { private Map subAggregatorbyName; private DeferringBucketCollector recordingWrapper; + private final List reducers; /** * Constructs a new Aggregator. @@ -56,8 +58,10 @@ public abstract class AggregatorBase extends Aggregator { * @param parent The parent aggregator (may be {@code null} for top level aggregators) * @param metaData The metaData associated with this aggregator */ - protected AggregatorBase(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, Map metaData) throws IOException { + protected AggregatorBase(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, + List reducers, Map metaData) throws IOException { this.name = name; + this.reducers = reducers; this.metaData = metaData; this.parent = parent; this.context = context; @@ -112,6 +116,10 @@ public abstract class AggregatorBase extends Aggregator { return this.metaData; } + public List reducers() { + return this.reducers; + } + /** * Get a {@link LeafBucketCollector} for the given ctx, which should * delegate to the given collector. diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 98cc7e39e1a..ba464531b8b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -18,13 +18,18 @@ */ package org.elasticsearch.search.aggregations; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.aggregations.support.AggregationContext; +import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedList; import java.util.List; +import java.util.Map; import java.util.Set; /** @@ -34,18 +39,30 @@ public class AggregatorFactories { public static final AggregatorFactories EMPTY = new Empty(); + private AggregatorFactory parent; private AggregatorFactory[] factories; + private List reducerFactories; public static Builder builder() { return new Builder(); } - private AggregatorFactories(AggregatorFactory[] factories) { + private AggregatorFactories(AggregatorFactory[] factories, List reducers) { this.factories = factories; + this.reducerFactories = reducers; + } + + public List createReducers() throws IOException { + List reducers = new ArrayList<>(); + for (ReducerFactory factory : this.reducerFactories) { + reducers.add(factory.create()); + } + return reducers; } /** - * Create all aggregators so that they can be consumed with multiple buckets. + * Create all aggregators so that they can be consumed with multiple + * buckets. */ public Aggregator[] createSubAggregators(Aggregator parent) throws IOException { Aggregator[] aggregators = new Aggregator[count()]; @@ -76,6 +93,7 @@ public class AggregatorFactories { } void setParent(AggregatorFactory parent) { + this.parent = parent; for (AggregatorFactory factory : factories) { factory.parent = parent; } @@ -85,15 +103,19 @@ public class AggregatorFactories { for (AggregatorFactory factory : factories) { factory.validate(); } + for (ReducerFactory factory : reducerFactories) { + factory.validate(parent, factories, reducerFactories); + } } private final static class Empty extends AggregatorFactories { private static final AggregatorFactory[] EMPTY_FACTORIES = new AggregatorFactory[0]; private static final Aggregator[] EMPTY_AGGREGATORS = new Aggregator[0]; + private static final List EMPTY_REDUCERS = new ArrayList<>(); private Empty() { - super(EMPTY_FACTORIES); + super(EMPTY_FACTORIES, EMPTY_REDUCERS); } @Override @@ -112,20 +134,75 @@ public class AggregatorFactories { private final Set names = new HashSet<>(); private final List factories = new ArrayList<>(); + private final List reducerFactories = new ArrayList<>(); - public Builder add(AggregatorFactory factory) { + public Builder addAggregator(AggregatorFactory factory) { if (!names.add(factory.name)) { - throw new ElasticsearchIllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); + throw new IllegalArgumentException("Two sibling aggregations cannot have the same name: [" + factory.name + "]"); } factories.add(factory); return this; } + public Builder addReducer(ReducerFactory reducerFactory) { + this.reducerFactories.add(reducerFactory); + return this; + } + public AggregatorFactories build() { - if (factories.isEmpty()) { + if (factories.isEmpty() && reducerFactories.isEmpty()) { return EMPTY; } - return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()])); + List orderedReducers = resolveReducerOrder(this.reducerFactories, this.factories); + return new AggregatorFactories(factories.toArray(new AggregatorFactory[factories.size()]), orderedReducers); + } + + private List resolveReducerOrder(List reducerFactories, List aggFactories) { + Map reducerFactoriesMap = new HashMap<>(); + for (ReducerFactory factory : reducerFactories) { + reducerFactoriesMap.put(factory.getName(), factory); + } + Set aggFactoryNames = new HashSet<>(); + for (AggregatorFactory aggFactory : aggFactories) { + aggFactoryNames.add(aggFactory.name); + } + List orderedReducers = new LinkedList<>(); + List unmarkedFactories = new ArrayList(reducerFactories); + Set temporarilyMarked = new HashSet(); + while (!unmarkedFactories.isEmpty()) { + ReducerFactory factory = unmarkedFactories.get(0); + resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, temporarilyMarked, factory); + } + return orderedReducers; + } + + private void resolveReducerOrder(Set aggFactoryNames, Map reducerFactoriesMap, + List orderedReducers, List unmarkedFactories, Set temporarilyMarked, + ReducerFactory factory) { + if (temporarilyMarked.contains(factory)) { + throw new IllegalStateException("Cyclical dependancy found with reducer [" + factory.getName() + "]"); + } else if (unmarkedFactories.contains(factory)) { + temporarilyMarked.add(factory); + String[] bucketsPaths = factory.getBucketsPaths(); + for (String bucketsPath : bucketsPaths) { + List bucketsPathElements = AggregationPath.parse(bucketsPath).getPathElementsAsStringList(); + String firstAggName = bucketsPathElements.get(0); + if (bucketsPath.equals("_count") || bucketsPath.equals("_key") || aggFactoryNames.contains(firstAggName)) { + continue; + } else { + ReducerFactory matchingFactory = reducerFactoriesMap.get(firstAggName); + if (matchingFactory != null) { + resolveReducerOrder(aggFactoryNames, reducerFactoriesMap, orderedReducers, unmarkedFactories, + temporarilyMarked, matchingFactory); + } else { + throw new IllegalStateException("No aggregation found for path [" + bucketsPath + "]"); + } + } + } + unmarkedFactories.remove(factory); + temporarilyMarked.remove(factory); + orderedReducers.add(factory); + } } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java index 256700bada5..f69e54ee710 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactory.java @@ -23,10 +23,12 @@ import org.apache.lucene.search.Scorer; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ObjectArray; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext.Lifetime; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -64,6 +66,10 @@ public abstract class AggregatorFactory { return this; } + public String name() { + return name; + } + /** * Validates the state of this factory (makes sure the factory is properly configured) */ @@ -79,7 +85,8 @@ public abstract class AggregatorFactory { return parent; } - protected abstract Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException; + protected abstract Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException; /** * Creates the aggregator @@ -92,7 +99,7 @@ public abstract class AggregatorFactory { * @return The created aggregator */ public final Aggregator create(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket) throws IOException { - return createInternal(context, parent, collectsFromSingleBucket, this.metaData); + return createInternal(context, parent, collectsFromSingleBucket, this.factories.createReducers(), this.metaData); } public void doValidate() { @@ -102,6 +109,8 @@ public abstract class AggregatorFactory { this.metaData = metaData; } + + /** * Utility method. Given an {@link AggregatorFactory} that creates {@link Aggregator}s that only know how * to collect bucket 0, this returns an aggregator that can collect any bucket. diff --git a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java index b55f6a4f022..32969104733 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java +++ b/src/main/java/org/elasticsearch/search/aggregations/AggregatorParsers.java @@ -19,10 +19,13 @@ package org.elasticsearch.search.aggregations; import com.google.common.collect.ImmutableMap; + import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; @@ -37,21 +40,30 @@ import java.util.regex.Pattern; public class AggregatorParsers { public static final Pattern VALID_AGG_NAME = Pattern.compile("[^\\[\\]>]+"); - private final ImmutableMap parsers; + private final ImmutableMap aggParsers; + private final ImmutableMap reducerParsers; /** * Constructs the AggregatorParsers out of all the given parsers * - * @param parsers The available aggregator parsers (dynamically injected by the {@link org.elasticsearch.search.aggregations.AggregationModule}). + * @param aggParsers + * The available aggregator parsers (dynamically injected by the + * {@link org.elasticsearch.search.aggregations.AggregationModule} + * ). */ @Inject - public AggregatorParsers(Set parsers) { - MapBuilder builder = MapBuilder.newMapBuilder(); - for (Aggregator.Parser parser : parsers) { - builder.put(parser.type(), parser); + public AggregatorParsers(Set aggParsers, Set reducerParsers) { + MapBuilder aggParsersBuilder = MapBuilder.newMapBuilder(); + for (Aggregator.Parser parser : aggParsers) { + aggParsersBuilder.put(parser.type(), parser); } - this.parsers = builder.immutableMap(); + this.aggParsers = aggParsersBuilder.immutableMap(); + MapBuilder reducerParsersBuilder = MapBuilder.newMapBuilder(); + for (Reducer.Parser parser : reducerParsers) { + reducerParsersBuilder.put(parser.type(), parser); + } + this.reducerParsers = reducerParsersBuilder.immutableMap(); } /** @@ -61,7 +73,18 @@ public class AggregatorParsers { * @return The parser associated with the given aggregation type. */ public Aggregator.Parser parser(String type) { - return parsers.get(type); + return aggParsers.get(type); + } + + /** + * Returns the parser that is registered under the given reducer type. + * + * @param type + * The reducer type + * @return The parser associated with the given reducer type. + */ + public Reducer.Parser reducer(String type) { + return reducerParsers.get(type); } /** @@ -86,32 +109,39 @@ public class AggregatorParsers { XContentParser.Token token = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token != XContentParser.Token.FIELD_NAME) { - throw new SearchParseException(context, "Unexpected token " + token + " in [aggs]: aggregations definitions must start with the name of the aggregation."); + throw new SearchParseException(context, "Unexpected token " + token + + " in [aggs]: aggregations definitions must start with the name of the aggregation.", parser.getTokenLocation()); } final String aggregationName = parser.currentName(); if (!validAggMatcher.reset(aggregationName).matches()) { - throw new SearchParseException(context, "Invalid aggregation name [" + aggregationName + "]. Aggregation names must be alpha-numeric and can only contain '_' and '-'"); + throw new SearchParseException(context, "Invalid aggregation name [" + aggregationName + + "]. Aggregation names must be alpha-numeric and can only contain '_' and '-'", parser.getTokenLocation()); } token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new SearchParseException(context, "Aggregation definition for [" + aggregationName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new SearchParseException(context, "Aggregation definition for [" + aggregationName + " starts with a [" + token + + "], expected a [" + XContentParser.Token.START_OBJECT + "].", parser.getTokenLocation()); } - AggregatorFactory factory = null; + AggregatorFactory aggFactory = null; + ReducerFactory reducerFactory = null; AggregatorFactories subFactories = null; Map metaData = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token != XContentParser.Token.FIELD_NAME) { - throw new SearchParseException(context, "Expected [" + XContentParser.Token.FIELD_NAME + "] under a [" + XContentParser.Token.START_OBJECT + "], but got a [" + token + "] in [" + aggregationName + "]"); + throw new SearchParseException(context, "Expected [" + XContentParser.Token.FIELD_NAME + "] under a [" + + XContentParser.Token.START_OBJECT + "], but got a [" + token + "] in [" + aggregationName + "]", + parser.getTokenLocation()); } final String fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new SearchParseException(context, "Expected [" + XContentParser.Token.START_OBJECT + "] under [" + fieldName + "], but got a [" + token + "] in [" + aggregationName + "]"); + throw new SearchParseException(context, "Expected [" + XContentParser.Token.START_OBJECT + "] under [" + fieldName + + "], but got a [" + token + "] in [" + aggregationName + "]", parser.getTokenLocation()); } switch (fieldName) { @@ -121,39 +151,63 @@ public class AggregatorParsers { case "aggregations": case "aggs": if (subFactories != null) { - throw new SearchParseException(context, "Found two sub aggregation definitions under [" + aggregationName + "]"); + throw new SearchParseException(context, "Found two sub aggregation definitions under [" + aggregationName + "]", + parser.getTokenLocation()); } subFactories = parseAggregators(parser, context, level+1); break; default: - if (factory != null) { - throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + factory.type + "] and [" + fieldName + "]"); + if (aggFactory != null) { + throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + + aggFactory.type + "] and [" + fieldName + "]", parser.getTokenLocation()); } + if (reducerFactory != null) { + // TODO we would need a .type property on reducers too for this error message? + throw new SearchParseException(context, "Found two aggregation type definitions in [" + aggregationName + "]: [" + + reducerFactory + "] and [" + fieldName + "]", parser.getTokenLocation()); + } + Aggregator.Parser aggregatorParser = parser(fieldName); if (aggregatorParser == null) { - throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + aggregationName + "]"); + Reducer.Parser reducerParser = reducer(fieldName); + if (reducerParser == null) { + throw new SearchParseException(context, "Could not find aggregator type [" + fieldName + "] in [" + + aggregationName + "]", parser.getTokenLocation()); + } else { + reducerFactory = reducerParser.parse(aggregationName, parser, context); + } + } else { + aggFactory = aggregatorParser.parse(aggregationName, parser, context); } - factory = aggregatorParser.parse(aggregationName, parser, context); } } - if (factory == null) { - throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]"); - } - + if (aggFactory == null && reducerFactory == null) { + throw new SearchParseException(context, "Missing definition for aggregation [" + aggregationName + "]", + parser.getTokenLocation()); + } else if (aggFactory != null) { + assert reducerFactory == null; if (metaData != null) { - factory.setMetaData(metaData); + aggFactory.setMetaData(metaData); } if (subFactories != null) { - factory.subFactories(subFactories); + aggFactory.subFactories(subFactories); } if (level == 0) { - factory.validate(); + aggFactory.validate(); } - factories.add(factory); + factories.addAggregator(aggFactory); + } else { + assert reducerFactory != null; + if (subFactories != null) { + throw new SearchParseException(context, "Aggregation [" + aggregationName + "] cannot define sub-aggregations", + parser.getTokenLocation()); + } + factories.addReducer(reducerFactory); + } } return factories.build(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java index 5fe69e74060..c9bb647c0f3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregation.java @@ -18,6 +18,9 @@ */ package org.elasticsearch.search.aggregations; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.Lists; + import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,6 +31,8 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; @@ -110,6 +115,8 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St protected Map metaData; + private List reducers; + /** Constructs an un initialized addAggregation (used for serialization) **/ protected InternalAggregation() {} @@ -118,8 +125,9 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St * * @param name The name of the get. */ - protected InternalAggregation(String name, Map metaData) { + protected InternalAggregation(String name, List reducers, Map metaData) { this.name = name; + this.reducers = reducers; this.metaData = metaData; } @@ -139,7 +147,15 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St * try reusing an existing get instance (typically the first in the given list) to save on redundant object * construction. */ - public abstract InternalAggregation reduce(List aggregations, ReduceContext reduceContext); + public final InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + InternalAggregation aggResult = doReduce(aggregations, reduceContext); + for (Reducer reducer : reducers) { + aggResult = reducer.reduce(aggResult, reduceContext); + } + return aggResult; + } + + public abstract InternalAggregation doReduce(List aggregations, ReduceContext reduceContext); @Override public Object getProperty(String path) { @@ -172,6 +188,10 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St return metaData; } + public List reducers() { + return reducers; + } + @Override public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name); @@ -190,6 +210,11 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St public final void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeGenericValue(metaData); + out.writeVInt(reducers.size()); + for (Reducer reducer : reducers) { + out.writeBytesReference(reducer.type().stream()); + reducer.writeTo(out); + } doWriteTo(out); } @@ -199,6 +224,17 @@ public abstract class InternalAggregation implements Aggregation, ToXContent, St public final void readFrom(StreamInput in) throws IOException { name = in.readString(); metaData = in.readMap(); + int size = in.readVInt(); + if (size == 0) { + reducers = ImmutableList.of(); + } else { + reducers = Lists.newArrayListWithCapacity(size); + for (int i = 0; i < size; i++) { + BytesReference type = in.readBytesReference(); + Reducer reducer = ReducerStreams.stream(type).readResult(in); + reducers.add(reducer); + } + } doReadFrom(in); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index 7537878ae04..ceefcae41b6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -25,7 +25,6 @@ import com.google.common.collect.Iterators; import com.google.common.collect.Lists; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -133,7 +132,7 @@ public class InternalAggregations implements Aggregations, ToXContent, Streamabl String aggName = path.get(0); InternalAggregation aggregation = get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "]"); + throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java index 572680c9318..db2ac49bf38 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InternalMultiBucketAggregation.java @@ -19,21 +19,44 @@ package org.elasticsearch.search.aggregations; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.util.List; import java.util.Map; -public abstract class InternalMultiBucketAggregation extends InternalAggregation implements MultiBucketsAggregation { +public abstract class InternalMultiBucketAggregation + extends InternalAggregation implements MultiBucketsAggregation { public InternalMultiBucketAggregation() { } - public InternalMultiBucketAggregation(String name, Map metaData) { - super(name, metaData); + public InternalMultiBucketAggregation(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } + /** + * Create a new copy of this {@link Aggregation} with the same settings as + * this {@link Aggregation} and contains the provided buckets. + * + * @param buckets + * the buckets to use in the new {@link Aggregation} + * @return the new {@link Aggregation} + */ + public abstract A create(List buckets); + + /** + * Create a new {@link InternalBucket} using the provided prototype bucket + * and aggregations. + * + * @param aggregations + * the aggregations for the new bucket + * @param prototype + * the bucket to use as a prototype + * @return the new bucket + */ + public abstract B createBucket(InternalAggregations aggregations, B prototype); + @Override public Object getProperty(List path) { if (path.isEmpty()) { @@ -58,18 +81,19 @@ public abstract class InternalMultiBucketAggregation extends InternalAggregation String aggName = path.get(0); if (aggName.equals("_count")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_count must be the last element in the path"); + throw new InvalidAggregationPathException("_count must be the last element in the path"); } return getDocCount(); } else if (aggName.equals("_key")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_key must be the last element in the path"); + throw new InvalidAggregationPathException("_key must be the last element in the path"); } return getKey(); } InternalAggregation aggregation = aggregations.get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + containingAggName + "]"); + throw new InvalidAggregationPathException("Cannot find an aggregation named [" + aggName + "] in [" + containingAggName + + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java b/src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java similarity index 71% rename from src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java rename to src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java index d837d699402..e2ab1f65245 100644 --- a/src/main/java/org/elasticsearch/ElasticsearchIllegalStateException.java +++ b/src/main/java/org/elasticsearch/search/aggregations/InvalidAggregationPathException.java @@ -17,22 +17,17 @@ * under the License. */ -package org.elasticsearch; +package org.elasticsearch.search.aggregations; -/** - * - */ -public class ElasticsearchIllegalStateException extends ElasticsearchException { +import org.elasticsearch.ElasticsearchException; - public ElasticsearchIllegalStateException() { - super(null); - } +public class InvalidAggregationPathException extends ElasticsearchException { - public ElasticsearchIllegalStateException(String msg) { + public InvalidAggregationPathException(String msg) { super(msg); } - public ElasticsearchIllegalStateException(String msg, Throwable cause) { + public InvalidAggregationPathException(String msg, Throwable cause) { super(msg, cause); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java index 33c4215e27a..9b64c647b38 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/NonCollectingAggregator.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations; import org.apache.lucene.index.LeafReaderContext; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -31,12 +33,14 @@ import java.util.Map; */ public abstract class NonCollectingAggregator extends AggregatorBase { - protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, AggregatorFactories subFactories, Map metaData) throws IOException { - super(name, subFactories, context, parent, metaData); + protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, AggregatorFactories subFactories, + List reducers, Map metaData) throws IOException { + super(name, subFactories, context, parent, reducers, metaData); } - protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - this(name, context, parent, AggregatorFactories.EMPTY, metaData); + protected NonCollectingAggregator(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + this(name, context, parent, AggregatorFactories.EMPTY, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java index a45b9b9857a..27f5cbcf0aa 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/TransportAggregationModule.java @@ -59,6 +59,13 @@ import org.elasticsearch.search.aggregations.metrics.stats.extended.InternalExte import org.elasticsearch.search.aggregations.metrics.sum.InternalSum; import org.elasticsearch.search.aggregations.metrics.tophits.InternalTopHits; import org.elasticsearch.search.aggregations.metrics.valuecount.InternalValueCount; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketReducer; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeReducer; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgReducer; +import org.elasticsearch.search.aggregations.reducers.movavg.models.TransportMovAvgModelModule; /** * A module that registers all the transport streams for the addAggregation @@ -93,7 +100,7 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM SignificantStringTerms.registerStreams(); SignificantLongTerms.registerStreams(); UnmappedSignificantTerms.registerStreams(); - InternalGeoHashGrid.registerStreams(); + InternalGeoHashGrid.registerStreams(); DoubleTerms.registerStreams(); UnmappedTerms.registerStreams(); InternalRange.registerStream(); @@ -106,10 +113,18 @@ public class TransportAggregationModule extends AbstractModule implements SpawnM InternalTopHits.registerStreams(); InternalGeoBounds.registerStream(); InternalChildren.registerStream(); + + // Reducers + DerivativeReducer.registerStreams(); + InternalSimpleValue.registerStreams(); + InternalBucketMetricValue.registerStreams(); + MaxBucketReducer.registerStreams(); + MinBucketReducer.registerStreams(); + MovAvgReducer.registerStreams(); } @Override public Iterable spawnModules() { - return ImmutableList.of(new TransportSignificantTermsHeuristicModule()); + return ImmutableList.of(new TransportSignificantTermsHeuristicModule(), new TransportMovAvgModelModule()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java index f0c0294b4d7..8a379d1ad82 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestBucketsDeferringCollector.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.util.packed.PackedInts; import org.apache.lucene.util.packed.PackedLongValues; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongHash; @@ -71,7 +70,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { @Override public boolean needsScores() { if (collector == null) { - throw new ElasticsearchIllegalStateException(); + throw new IllegalStateException(); } return collector.needsScores(); } @@ -127,10 +126,10 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { @Override public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { if (!finished) { - throw new ElasticsearchIllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); + throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); } if (this.selectedBuckets != null) { - throw new ElasticsearchIllegalStateException("Already been replayed"); + throw new IllegalStateException("Already been replayed"); } final LongHash hash = new LongHash(selectedBuckets.length, BigArrays.NON_RECYCLING_INSTANCE); @@ -141,7 +140,7 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { collector.preCollection(); if (collector.needsScores()) { - throw new ElasticsearchIllegalStateException("Cannot defer if scores are needed"); + throw new IllegalStateException("Cannot defer if scores are needed"); } for (Entry entry : entries) { @@ -176,11 +175,11 @@ public class BestBucketsDeferringCollector extends DeferringBucketCollector { @Override public InternalAggregation buildAggregation(long bucket) throws IOException { if (selectedBuckets == null) { - throw new ElasticsearchIllegalStateException("Collection has not been replayed yet."); + throw new IllegalStateException("Collection has not been replayed yet."); } final long rebasedBucket = selectedBuckets.find(bucket); if (rebasedBucket == -1) { - throw new ElasticsearchIllegalStateException("Cannot build for a bucket which has not been collected"); + throw new IllegalStateException("Cannot build for a bucket which has not been collected"); } return in.buildAggregation(rebasedBucket); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java index 437e642d7e6..95bc28ef35a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BestDocsDeferringCollector.java @@ -27,7 +27,6 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopScoreDocCollector; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollector; @@ -122,10 +121,10 @@ public class BestDocsDeferringCollector extends DeferringBucketCollector { @Override public void prepareSelectedBuckets(long... selectedBuckets) throws IOException { if (!finished) { - throw new ElasticsearchIllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); + throw new IllegalStateException("Cannot replay yet, collection is not finished: postCollect() has not been called"); } if (selectedBuckets.length > 1) { - throw new ElasticsearchIllegalStateException("Collection only supported on a single bucket"); + throw new IllegalStateException("Collection only supported on a single bucket"); } deferred.preCollection(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java index e4d0260cf93..041c15a5dc1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/BucketsAggregator.java @@ -27,10 +27,12 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -41,9 +43,9 @@ public abstract class BucketsAggregator extends AggregatorBase { private final BigArrays bigArrays; private IntArray docCounts; - public BucketsAggregator(String name, AggregatorFactories factories, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, factories, context, parent, metaData); + public BucketsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, + List reducers, Map metaData) throws IOException { + super(name, factories, context, parent, reducers, metaData); bigArrays = context.bigArrays(); docCounts = bigArrays.newIntArray(1, true); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java index b0f2693e9eb..0b51a53be81 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/DeferringBucketCollector.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.BucketCollector; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -74,7 +73,7 @@ public abstract class DeferringBucketCollector extends BucketCollector { } @Override - public void close() throws ElasticsearchException { + public void close() { in.close(); } @@ -110,22 +109,22 @@ public abstract class DeferringBucketCollector extends BucketCollector { @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx) throws IOException { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } @Override public void preCollection() throws IOException { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } @Override public void postCollection() throws IOException { - throw new ElasticsearchIllegalStateException( + throw new IllegalStateException( "Deferred collectors cannot be collected directly. They must be collected through the recording wrapper."); } } -} \ No newline at end of file +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java index f278be9f663..b9ff232b626 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/InternalSingleBucketAggregation.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.search.aggregations.bucket; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -47,8 +47,8 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio * @param docCount The document count in the single bucket. * @param aggregations The already built sub-aggregations that are associated with the bucket. */ - protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, metaData); + protected InternalSingleBucketAggregation(String name, long docCount, InternalAggregations aggregations, List reducers, Map metaData) { + super(name, reducers, metaData); this.docCount = docCount; this.aggregations = aggregations; } @@ -69,7 +69,7 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio protected abstract InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations); @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { long docCount = 0L; List subAggregationsList = new ArrayList<>(aggregations.size()); for (InternalAggregation aggregation : aggregations) { @@ -89,13 +89,13 @@ public abstract class InternalSingleBucketAggregation extends InternalAggregatio String aggName = path.get(0); if (aggName.equals("_count")) { if (path.size() > 1) { - throw new ElasticsearchIllegalArgumentException("_count must be the last element in the path"); + throw new IllegalArgumentException("_count must be the last element in the path"); } return getDocCount(); } InternalAggregation aggregation = aggregations.get(aggName); if (aggregation == null) { - throw new ElasticsearchIllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Cannot find an aggregation named [" + aggName + "] in [" + getName() + "]"); } return aggregation.getProperty(path.subList(1, path.size())); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java index d8b884a88e4..2e032640f98 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/SingleBucketAggregator.java @@ -20,9 +20,11 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -31,8 +33,9 @@ import java.util.Map; public abstract class SingleBucketAggregator extends BucketsAggregator { protected SingleBucketAggregator(String name, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, + List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java index 4834774053b..2f50bbf69ee 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ChildrenParser.java @@ -56,15 +56,18 @@ public class ChildrenParser implements Aggregator.Parser { if ("type".equals(currentFieldName)) { childType = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (childType == null) { - throw new SearchParseException(context, "Missing [child_type] field for children aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [child_type] field for children aggregation [" + aggregationName + "]", + parser.getTokenLocation()); } ValuesSourceConfig config = new ValuesSourceConfig<>(ValuesSource.Bytes.WithOrdinals.ParentChild.class); @@ -76,13 +79,13 @@ public class ChildrenParser implements Aggregator.Parser { if (childDocMapper != null) { ParentFieldMapper parentFieldMapper = childDocMapper.parentFieldMapper(); if (!parentFieldMapper.active()) { - throw new SearchParseException(context, "[children] _parent field not configured"); + throw new SearchParseException(context, "[children] _parent field not configured", parser.getTokenLocation()); } parentType = parentFieldMapper.type(); DocumentMapper parentDocMapper = context.mapperService().documentMapper(parentType); if (parentDocMapper != null) { - parentFilter = context.filterCache().cache(parentDocMapper.typeFilter(), null, context.queryParserService().autoFilterCachePolicy()); - childFilter = context.filterCache().cache(childDocMapper.typeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + parentFilter = parentDocMapper.typeFilter(); + childFilter = childDocMapper.typeFilter(); ParentChildIndexFieldData parentChildIndexFieldData = context.fieldData().getForField(parentFieldMapper); config.fieldContext(new FieldContext(parentFieldMapper.names().indexName(), parentChildIndexFieldData, parentFieldMapper)); } else { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java index 427637b9da7..cfac7f834bc 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/InternalChildren.java @@ -23,8 +23,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class InternalChildren extends InternalSingleBucketAggregation implements public InternalChildren() { } - public InternalChildren(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + public InternalChildren(String name, long docCount, InternalAggregations aggregations, List reducers, + Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +63,6 @@ public class InternalChildren extends InternalSingleBucketAggregation implements @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalChildren(name, docCount, subAggregations, getMetaData()); + return new InternalChildren(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java index 40db0d054c1..e456e93c8a1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/children/ParentToChildrenAggregator.java @@ -20,33 +20,42 @@ package org.elasticsearch.search.aggregations.bucket.children; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.docset.DocIdSets; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.common.util.LongObjectPagedHashMap; import org.elasticsearch.index.search.child.ConstantScorer; -import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; -import java.util.*; +import java.util.Arrays; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; // The RecordingPerReaderBucketCollector assumes per segment recording which isn't the case for this // aggregation, for this reason that collector can't be used public class ParentToChildrenAggregator extends SingleBucketAggregator { private final String parentType; - private final Filter childFilter; - private final Filter parentFilter; + private final Weight childFilter; + private final Weight parentFilter; private final ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource; // Maybe use PagedGrowableWriter? This will be less wasteful than LongArray, but then we don't have the reuse feature of BigArrays. @@ -64,12 +73,13 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { public ParentToChildrenAggregator(String name, AggregatorFactories factories, AggregationContext aggregationContext, Aggregator parent, String parentType, Filter childFilter, Filter parentFilter, - ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, long maxOrd, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, + long maxOrd, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.parentType = parentType; // these two filters are cached in the parser - this.childFilter = childFilter; - this.parentFilter = parentFilter; + this.childFilter = aggregationContext.searchContext().searcher().createNormalizedWeight(childFilter, false); + this.parentFilter = aggregationContext.searchContext().searcher().createNormalizedWeight(parentFilter, false); this.parentOrdToBuckets = aggregationContext.bigArrays().newLongArray(maxOrd, false); this.parentOrdToBuckets.fill(0, maxOrd, -1); this.parentOrdToOtherBuckets = new LongObjectPagedHashMap<>(aggregationContext.bigArrays()); @@ -78,12 +88,13 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalChildren(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalChildren(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalChildren(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalChildren(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } @Override @@ -93,18 +104,14 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { return LeafBucketCollector.NO_OP_COLLECTOR; } if (replay == null) { - throw new ElasticsearchIllegalStateException(); + throw new IllegalStateException(); } final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); assert globalOrdinals != null; - DocIdSet parentDocIdSet = parentFilter.getDocIdSet(ctx, null); - // The DocIdSets.toSafeBits(...) can convert to FixedBitSet, but this - // will only happen if the none filter cache is used. (which only happens in tests) - // Otherwise the filter cache will produce a bitset based filter. - final Bits parentDocs = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), parentDocIdSet); - DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, null); - if (DocIdSets.isEmpty(childDocIdSet) == false) { + Scorer parentScorer = parentFilter.scorer(ctx, null); + final Bits parentDocs = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), parentScorer); + if (childFilter.scorer(ctx, null) != null) { replay.add(ctx); } return new LeafBucketCollector() { @@ -139,18 +146,14 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { this.replay = null; for (LeafReaderContext ctx : replay) { - final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); - - final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); - DocIdSet childDocIdSet = childFilter.getDocIdSet(ctx, ctx.reader().getLiveDocs()); - if (childDocIdSet == null) { - continue; - } - DocIdSetIterator childDocsIter = childDocIdSet.iterator(); + DocIdSetIterator childDocsIter = childFilter.scorer(ctx, ctx.reader().getLiveDocs()); if (childDocsIter == null) { continue; } + final LeafBucketCollector sub = collectableSubAggregators.getLeafCollector(ctx); + final SortedDocValues globalOrdinals = valuesSource.globalOrdinalsValues(parentType, ctx); + // Set the scorer, since we now replay only the child docIds sub.setScorer(ConstantScorer.create(childDocsIter, null, 1f)); @@ -193,21 +196,25 @@ public class ParentToChildrenAggregator extends SingleBucketAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new NonCollectingAggregator(name, aggregationContext, parent, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new NonCollectingAggregator(name, aggregationContext, parent, reducers, metaData) { @Override public InternalAggregation buildEmptyAggregation() { - return new InternalChildren(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalChildren(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } }; } @Override - protected Aggregator doCreateInternal(ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Bytes.WithOrdinals.ParentChild valuesSource, + AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, List reducers, + Map metaData) throws IOException { long maxOrd = valuesSource.globalMaxOrd(aggregationContext.searchContext().searcher(), parentType); - return new ParentToChildrenAggregator(name, factories, aggregationContext, parent, parentType, childFilter, parentFilter, valuesSource, maxOrd, metaData); + return new ParentToChildrenAggregator(name, factories, aggregationContext, parent, parentType, childFilter, parentFilter, + valuesSource, maxOrd, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java index d5b15dba1ca..ed974279133 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregator.java @@ -20,8 +20,11 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -29,9 +32,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -39,24 +44,23 @@ import java.util.Map; */ public class FilterAggregator extends SingleBucketAggregator { - private final Filter filter; + private final Weight filter; public FilterAggregator(String name, - org.apache.lucene.search.Filter filter, + Query filter, AggregatorFactories factories, AggregationContext aggregationContext, - Aggregator parent, + Aggregator parent, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); - this.filter = filter; + super(name, factories, aggregationContext, parent, reducers, metaData); + this.filter = aggregationContext.searchContext().searcher().createNormalizedWeight(filter, false); } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // TODO: use the iterator if the filter does not support random access // no need to provide deleted docs to the filter - final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.getDocIdSet(ctx, null)); + final Bits bits = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filter.scorer(ctx, null)); return new LeafBucketCollectorBase(sub, null) { @Override public void collect(int doc, long bucket) throws IOException { @@ -69,12 +73,13 @@ public class FilterAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalFilter(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalFilter(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalFilter(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalFilter(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } public static class Factory extends AggregatorFactory { @@ -87,8 +92,9 @@ public class FilterAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new FilterAggregator(name, filter, factories, context, parent, metaData); + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { + return new FilterAggregator(name, filter, factories, context, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java index c3d84b9fe51..0429ea20a59 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilter.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -48,8 +50,8 @@ public class InternalFilter extends InternalSingleBucketAggregation implements F InternalFilter() {} // for serialization - InternalFilter(String name, long docCount, InternalAggregations subAggregations, Map metaData) { - super(name, docCount, subAggregations, metaData); + InternalFilter(String name, long docCount, InternalAggregations subAggregations, List reducers, Map metaData) { + super(name, docCount, subAggregations, reducers, metaData); } @Override @@ -59,6 +61,6 @@ public class InternalFilter extends InternalSingleBucketAggregation implements F @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalFilter(name, docCount, subAggregations, getMetaData()); + return new InternalFilter(name, docCount, subAggregations, reducers(), getMetaData()); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java index b97a5442ced..267833a8d95 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersAggregator.java @@ -22,9 +22,11 @@ package org.elasticsearch.search.aggregations.bucket.filters; import com.google.common.collect.Lists; import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.Filter; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; @@ -33,6 +35,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; @@ -47,32 +50,39 @@ public class FiltersAggregator extends BucketsAggregator { static class KeyedFilter { final String key; - final Filter filter; + final Query filter; - KeyedFilter(String key, Filter filter) { + KeyedFilter(String key, Query filter) { this.key = key; this.filter = filter; } } - private final KeyedFilter[] filters; + private final String[] keys; + private final Weight[] filters; private final boolean keyed; public FiltersAggregator(String name, AggregatorFactories factories, List filters, boolean keyed, AggregationContext aggregationContext, - Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.keyed = keyed; - this.filters = filters.toArray(new KeyedFilter[filters.size()]); + this.keys = new String[filters.size()]; + this.filters = new Weight[filters.size()]; + for (int i = 0; i < filters.size(); ++i) { + KeyedFilter keyedFilter = filters.get(i); + this.keys[i] = keyedFilter.key; + this.filters[i] = aggregationContext.searchContext().searcher().createNormalizedWeight(keyedFilter.filter, false); + } } @Override public LeafBucketCollector getLeafCollector(LeafReaderContext ctx, final LeafBucketCollector sub) throws IOException { - // TODO: use the iterator if the filter does not support random access // no need to provide deleted docs to the filter final Bits[] bits = new Bits[filters.length]; for (int i = 0; i < filters.length; ++i) { - bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].filter.getDocIdSet(ctx, null)); + bits[i] = DocIdSets.asSequentialAccessBits(ctx.reader().maxDoc(), filters[i].scorer(ctx, null)); } return new LeafBucketCollectorBase(sub, null) { @Override @@ -89,24 +99,23 @@ public class FiltersAggregator extends BucketsAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { List buckets = Lists.newArrayListWithCapacity(filters.length); - for (int i = 0; i < filters.length; i++) { - KeyedFilter filter = filters[i]; + for (int i = 0; i < keys.length; i++) { long bucketOrd = bucketOrd(owningBucketOrdinal, i); - InternalFilters.Bucket bucket = new InternalFilters.Bucket(filter.key, bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); + InternalFilters.Bucket bucket = new InternalFilters.Bucket(keys[i], bucketDocCount(bucketOrd), bucketAggregations(bucketOrd), keyed); buckets.add(bucket); } - return new InternalFilters(name, buckets, keyed, metaData()); + return new InternalFilters(name, buckets, keyed, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { InternalAggregations subAggs = buildEmptySubAggregations(); List buckets = Lists.newArrayListWithCapacity(filters.length); - for (int i = 0; i < filters.length; i++) { - InternalFilters.Bucket bucket = new InternalFilters.Bucket(filters[i].key, 0, subAggs, keyed); + for (int i = 0; i < keys.length; i++) { + InternalFilters.Bucket bucket = new InternalFilters.Bucket(keys[i], 0, subAggs, keyed); buckets.add(bucket); } - return new InternalFilters(name, buckets, keyed, metaData()); + return new InternalFilters(name, buckets, keyed, reducers(), metaData()); } final long bucketOrd(long owningBucketOrdinal, int filterOrd) { @@ -125,8 +134,9 @@ public class FiltersAggregator extends BucketsAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new FiltersAggregator(name, factories, filters, keyed, context, parent, metaData); + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { + return new FiltersAggregator(name, factories, filters, keyed, context, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java index 49f43eafc36..e30fcc8a3a4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/FiltersParser.java @@ -65,7 +65,8 @@ public class FiltersParser implements Aggregator.Parser { } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if ("filters".equals(currentFieldName)) { @@ -78,10 +79,12 @@ public class FiltersParser implements Aggregator.Parser { idx++; } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java index 91624557740..85477b0d70c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/filters/InternalFilters.java @@ -29,8 +29,10 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation.InternalBucket; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -41,7 +43,7 @@ import java.util.Map; /** * */ -public class InternalFilters extends InternalMultiBucketAggregation implements Filters { +public class InternalFilters extends InternalMultiBucketAggregation implements Filters { public final static Type TYPE = new Type("filters"); @@ -163,8 +165,8 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F public InternalFilters() {} // for serialization - public InternalFilters(String name, List buckets, boolean keyed, Map metaData) { - super(name, metaData); + public InternalFilters(String name, List buckets, boolean keyed, List reducers, Map metaData) { + super(name, reducers, metaData); this.buckets = buckets; this.keyed = keyed; } @@ -174,6 +176,16 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F return TYPE; } + @Override + public InternalFilters create(List buckets) { + return new InternalFilters(this.name, buckets, this.keyed, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.key, prototype.docCount, aggregations, prototype.keyed); + } + @Override public List getBuckets() { return buckets; @@ -191,7 +203,7 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { List> bucketsList = null; for (InternalAggregation aggregation : aggregations) { InternalFilters filters = (InternalFilters) aggregation; @@ -210,7 +222,7 @@ public class InternalFilters extends InternalMultiBucketAggregation implements F } } - InternalFilters reduced = new InternalFilters(name, new ArrayList(bucketsList.size()), keyed, getMetaData()); + InternalFilters reduced = new InternalFilters(name, new ArrayList(bucketsList.size()), keyed, reducers(), getMetaData()); for (List sameRangeList : bucketsList) { reduced.buckets.add((sameRangeList.get(0)).reduce(sameRangeList, reduceContext)); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 7e9f4682207..36448a103c1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -28,12 +28,14 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class GeoHashGridAggregator extends BucketsAggregator { private final LongHash bucketOrds; public GeoHashGridAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, - int requiredSize, int shardSize, AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + int requiredSize, int shardSize, AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.requiredSize = requiredSize; this.shardSize = shardSize; @@ -126,12 +129,12 @@ public class GeoHashGridAggregator extends BucketsAggregator { bucket.aggregations = bucketAggregations(bucket.bucketOrd); list[i] = bucket; } - return new InternalGeoHashGrid(name, requiredSize, Arrays.asList(list), metaData()); + return new InternalGeoHashGrid(name, requiredSize, Arrays.asList(list), reducers(), metaData()); } @Override public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metaData()); + return new InternalGeoHashGrid(name, requiredSize, Collections. emptyList(), reducers(), metaData()); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java index f2c058a8e1c..9f382d86906 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridBuilder.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.bucket.geogrid; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -56,7 +55,7 @@ public class GeoHashGridBuilder extends AggregationBuilder { */ public GeoHashGridBuilder precision(int precision) { if ((precision < 1) || (precision > 12)) { - throw new ElasticsearchIllegalArgumentException("Invalid geohash aggregation precision of " + precision + throw new IllegalArgumentException("Invalid geohash aggregation precision of " + precision + "must be between 1 and 12"); } this.precision = precision; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java index 185e5e002f6..24b6d490c9f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridParser.java @@ -34,6 +34,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.BucketUtils; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -43,6 +44,7 @@ import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -123,10 +125,11 @@ public class GeoHashGridParser implements Aggregator.Parser { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, metaData) { - @Override + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, + Collections. emptyList(), reducers, metaData); + return new NonCollectingAggregator(name, aggregationContext, parent, reducers, metaData) { public InternalAggregation buildEmptyAggregation() { return aggregation; } @@ -134,12 +137,15 @@ public class GeoHashGridParser implements Aggregator.Parser { } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, + Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) + throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } ValuesSource.Numeric cellIdSource = new CellIdSource(valuesSource, precision); - return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, aggregationContext, parent, metaData); + return new GeoHashGridAggregator(name, factories, cellIdSource, requiredSize, shardSize, aggregationContext, parent, reducers, + metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java index 40ab098b624..c480aa667c7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java @@ -32,6 +32,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -45,7 +46,8 @@ import java.util.Map; * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. */ -public class InternalGeoHashGrid extends InternalMultiBucketAggregation implements GeoHashGrid { +public class InternalGeoHashGrid extends InternalMultiBucketAggregation implements + GeoHashGrid { public static final Type TYPE = new Type("geohash_grid", "ghcells"); @@ -162,7 +164,6 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen return builder; } } - private int requiredSize; private Collection buckets; protected Map bucketMap; @@ -170,8 +171,9 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen InternalGeoHashGrid() { } // for serialization - public InternalGeoHashGrid(String name, int requiredSize, Collection buckets, Map metaData) { - super(name, metaData); + public InternalGeoHashGrid(String name, int requiredSize, Collection buckets, List reducers, + Map metaData) { + super(name, reducers, metaData); this.requiredSize = requiredSize; this.buckets = buckets; } @@ -181,6 +183,16 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen return TYPE; } + @Override + public InternalGeoHashGrid create(List buckets) { + return new InternalGeoHashGrid(this.name, this.requiredSize, buckets, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.geohashAsLong, prototype.docCount, aggregations); + } + @Override public List getBuckets() { Object o = buckets; @@ -188,7 +200,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen } @Override - public InternalGeoHashGrid reduce(List aggregations, ReduceContext reduceContext) { + public InternalGeoHashGrid doReduce(List aggregations, ReduceContext reduceContext) { LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { @@ -217,7 +229,7 @@ public class InternalGeoHashGrid extends InternalMultiBucketAggregation implemen for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } - return new InternalGeoHashGrid(getName(), requiredSize, Arrays.asList(list), getMetaData()); + return new InternalGeoHashGrid(getName(), requiredSize, Arrays.asList(list), reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java index 7862eade5d6..1fa3e95d876 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.global; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -28,9 +27,11 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -38,8 +39,9 @@ import java.util.Map; */ public class GlobalAggregator extends SingleBucketAggregator { - public GlobalAggregator(String name, AggregatorFactories subFactories, AggregationContext aggregationContext, Map metaData) throws IOException { - super(name, subFactories, aggregationContext, null, metaData); + public GlobalAggregator(String name, AggregatorFactories subFactories, AggregationContext aggregationContext, List reducers, + Map metaData) throws IOException { + super(name, subFactories, aggregationContext, null, reducers, metaData); } @Override @@ -57,7 +59,8 @@ public class GlobalAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { assert owningBucketOrdinal == 0 : "global aggregator can only be a top level aggregator"; - return new InternalGlobal(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalGlobal(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override @@ -72,15 +75,16 @@ public class GlobalAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (parent != null) { throw new AggregationExecutionException("Aggregation [" + parent.name() + "] cannot have a global " + "sub-aggregation [" + name + "]. Global aggregations can only be defined as top level aggregations"); } if (collectsFromSingleBucket == false) { - throw new ElasticsearchIllegalStateException(); + throw new IllegalStateException(); } - return new GlobalAggregator(name, factories, context, metaData); + return new GlobalAggregator(name, factories, context, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java index 6e317f26952..157d2c5c7f9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/global/InternalGlobal.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,8 @@ public class InternalGlobal extends InternalSingleBucketAggregation implements G InternalGlobal() {} // for serialization - InternalGlobal(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + InternalGlobal(String name, long docCount, InternalAggregations aggregations, List reducers, Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +62,6 @@ public class InternalGlobal extends InternalSingleBucketAggregation implements G @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalGlobal(name, docCount, subAggregations, getMetaData()); + return new InternalGlobal(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java index 9d08d2ce81a..e2ce1cc4b09 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramParser.java @@ -86,7 +86,7 @@ public class DateHistogramParser implements Aggregator.Parser { .build(); boolean keyed = false; - long minDocCount = 1; + long minDocCount = 0; ExtendedBounds extendedBounds = null; InternalOrder order = (InternalOrder) Histogram.Order.KEY_ASC; String interval = null; @@ -108,13 +108,15 @@ public class DateHistogramParser implements Aggregator.Parser { } else if (INTERVAL.match(currentFieldName)) { interval = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("min_doc_count".equals(currentFieldName) || "minDocCount".equals(currentFieldName)) { @@ -122,7 +124,8 @@ public class DateHistogramParser implements Aggregator.Parser { } else if ("time_zone".equals(currentFieldName) || "timeZone".equals(currentFieldName)) { timeZone = DateTimeZone.forOffsetHours(parser.intValue()); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if ("order".equals(currentFieldName)) { @@ -147,7 +150,8 @@ public class DateHistogramParser implements Aggregator.Parser { } else if ("max".equals(currentFieldName)) { extendedBounds.maxAsStr = parser.text(); } else { - throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("min".equals(currentFieldName)) { @@ -155,23 +159,28 @@ public class DateHistogramParser implements Aggregator.Parser { } else if ("max".equals(currentFieldName)) { extendedBounds.max = parser.longValue(); } else { - throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (interval == null) { - throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, + "Missing required field [interval] for histogram aggregation [" + aggregationName + "]", parser.getTokenLocation()); } TimeZoneRounding.Builder tzRoundingBuilder; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java index b041ef34fdb..c703058b699 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/ExtendedBounds.java @@ -56,7 +56,7 @@ public class ExtendedBounds { } if (min != null && max != null && min.compareTo(max) > 0) { throw new SearchParseException(context, "[extended_bounds.min][" + min + "] cannot be greater than " + - "[extended_bounds.max][" + max + "] for histogram aggregation [" + aggName + "]"); + "[extended_bounds.max][" + max + "] for histogram aggregation [" + aggName + "]", null); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java index a39a488a615..44342366b3f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -56,15 +57,14 @@ public class HistogramAggregator extends BucketsAggregator { private final InternalHistogram.Factory histogramFactory; private final LongHash bucketOrds; - private SortedNumericDocValues values; public HistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, InternalOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - InternalHistogram.Factory histogramFactory, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { + InternalHistogram.Factory histogramFactory, AggregationContext aggregationContext, + Aggregator parent, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + super(name, factories, aggregationContext, parent, reducers, metaData); this.rounding = rounding; this.order = order; this.keyed = keyed; @@ -130,13 +130,14 @@ public class HistogramAggregator extends BucketsAggregator { // value source will be null for unmapped fields InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, metaData()); + return histogramFactory.create(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { InternalHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; - return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, metaData()); + return histogramFactory.create(name, Collections.emptyList(), order, minDocCount, emptyBucketInfo, formatter, keyed, reducers(), + metaData()); } @Override @@ -166,13 +167,20 @@ public class HistogramAggregator extends BucketsAggregator { this.histogramFactory = histogramFactory; } - @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, null, null, config.formatter(), histogramFactory, aggregationContext, parent, metaData); + public long minDocCount() { + return minDocCount; } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, null, null, config.formatter(), + histogramFactory, aggregationContext, parent, reducers, metaData); + } + + @Override + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } @@ -185,7 +193,8 @@ public class HistogramAggregator extends BucketsAggregator { extendedBounds.processAndValidate(name, aggregationContext.searchContext(), config.parser()); roundedBounds = extendedBounds.round(rounding); } - return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource, config.formatter(), histogramFactory, aggregationContext, parent, metaData); + return new HistogramAggregator(name, factories, rounding, order, keyed, minDocCount, roundedBounds, valuesSource, + config.formatter(), histogramFactory, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java index f316237d734..ace6e6711c4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramParser.java @@ -52,7 +52,7 @@ public class HistogramParser implements Aggregator.Parser { .build(); boolean keyed = false; - long minDocCount = 1; + long minDocCount = 0; InternalOrder order = (InternalOrder) InternalOrder.KEY_ASC; long interval = -1; ExtendedBounds extendedBounds = null; @@ -75,7 +75,8 @@ public class HistogramParser implements Aggregator.Parser { } else if ("offset".equals(currentFieldName)) { offset = parser.longValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { if ("order".equals(currentFieldName)) { @@ -86,7 +87,8 @@ public class HistogramParser implements Aggregator.Parser { String dir = parser.text(); boolean asc = "asc".equals(dir); if (!asc && !"desc".equals(dir)) { - throw new SearchParseException(context, "Unknown order direction [" + dir + "] in aggregation [" + aggregationName + "]. Should be either [asc] or [desc]"); + throw new SearchParseException(context, "Unknown order direction [" + dir + "] in aggregation [" + + aggregationName + "]. Should be either [asc] or [desc]", parser.getTokenLocation()); } order = resolveOrder(currentFieldName, asc); } @@ -102,21 +104,25 @@ public class HistogramParser implements Aggregator.Parser { } else if ("max".equals(currentFieldName)) { extendedBounds.max = parser.longValue(true); } else { - throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown extended_bounds key for a " + token + " in aggregation [" + + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in aggregation [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in aggregation [" + aggregationName + "].", + parser.getTokenLocation()); } } if (interval < 1) { - throw new SearchParseException(context, "Missing required field [interval] for histogram aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, + "Missing required field [interval] for histogram aggregation [" + aggregationName + "]", parser.getTokenLocation()); } Rounding rounding = new Rounding.Interval(interval); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index 63cab59ad6b..a82a089066b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -19,16 +19,13 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.common.Nullable; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.InternalAggregation.Type; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.EmptyBucketInfo; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import java.util.List; -import java.util.Map; - /** * */ @@ -74,14 +71,20 @@ public class InternalDateHistogram { } @Override - public InternalHistogram create(String name, List buckets, InternalOrder order, - long minDocCount, EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalHistogram(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, metaData); + public InternalDateHistogram.Bucket createBucket(InternalAggregations aggregations, InternalDateHistogram.Bucket prototype) { + return new Bucket(prototype.key, prototype.docCount, aggregations, prototype.getKeyed(), prototype.formatter, this); } @Override - public InternalDateHistogram.Bucket createBucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { - return new Bucket(key, docCount, aggregations, keyed, formatter, this); + public InternalDateHistogram.Bucket createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, + @Nullable ValueFormatter formatter) { + if (key instanceof Number) { + return new Bucket(((Number) key).longValue(), docCount, aggregations, keyed, formatter, this); + } else if (key instanceof DateTime) { + return new Bucket(((DateTime) key).getMillis(), docCount, aggregations, keyed, formatter, this); + } else { + throw new AggregationExecutionException("Expected key of type Number or DateTime but got [" + key + "]"); + } } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 491422d20cf..1934676ac40 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -22,7 +22,6 @@ import com.google.common.collect.Lists; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -30,6 +29,7 @@ import org.elasticsearch.common.rounding.Rounding; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -37,6 +37,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -50,7 +51,8 @@ import java.util.Map; /** * TODO should be renamed to InternalNumericHistogram (see comment on {@link Histogram})? */ -public class InternalHistogram extends InternalMultiBucketAggregation implements Histogram { +public class InternalHistogram extends InternalMultiBucketAggregation implements + Histogram { final static Type TYPE = new Type("histogram", "histo"); @@ -68,7 +70,7 @@ public class InternalHistogram extends Inter public Bucket readResult(StreamInput in, BucketStreamContext context) throws IOException { Factory factory = (Factory) context.attributes().get("factory"); if (factory == null) { - throw new ElasticsearchIllegalStateException("No factory found for histogram buckets"); + throw new IllegalStateException("No factory found for histogram buckets"); } Bucket histogram = new Bucket(context.keyed(), context.formatter(), factory); histogram.readFrom(in); @@ -185,6 +187,14 @@ public class InternalHistogram extends Inter out.writeVLong(docCount); aggregations.writeTo(out); } + + public ValueFormatter getFormatter() { + return formatter; + } + + public boolean getKeyed() { + return keyed; + } } static class EmptyBucketInfo { @@ -223,7 +233,7 @@ public class InternalHistogram extends Inter } - static class Factory { + public static class Factory { protected Factory() { } @@ -233,12 +243,27 @@ public class InternalHistogram extends Inter } public InternalHistogram create(String name, List buckets, InternalOrder order, long minDocCount, - EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, metaData); + EmptyBucketInfo emptyBucketInfo, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + return new InternalHistogram<>(name, buckets, order, minDocCount, emptyBucketInfo, formatter, keyed, this, reducers, metaData); } - public B createBucket(long key, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { - return (B) new Bucket(key, docCount, keyed, formatter, this, aggregations); + public InternalHistogram create(List buckets, InternalHistogram prototype) { + return new InternalHistogram<>(prototype.name, buckets, prototype.order, prototype.minDocCount, prototype.emptyBucketInfo, + prototype.formatter, prototype.keyed, this, prototype.reducers(), prototype.metaData); + } + + public B createBucket(InternalAggregations aggregations, B prototype) { + return (B) new Bucket(prototype.key, prototype.docCount, prototype.getKeyed(), prototype.formatter, this, aggregations); + } + + public B createBucket(Object key, long docCount, InternalAggregations aggregations, boolean keyed, + @Nullable ValueFormatter formatter) { + if (key instanceof Number) { + return (B) new Bucket(((Number) key).longValue(), docCount, keyed, formatter, this, aggregations); + } else { + throw new AggregationExecutionException("Expected key of type Number but got [" + key + "]"); + } } protected B createEmptyBucket(boolean keyed, @Nullable ValueFormatter formatter) { @@ -259,8 +284,8 @@ public class InternalHistogram extends Inter InternalHistogram(String name, List buckets, InternalOrder order, long minDocCount, EmptyBucketInfo emptyBucketInfo, - @Nullable ValueFormatter formatter, boolean keyed, Factory factory, Map metaData) { - super(name, metaData); + @Nullable ValueFormatter formatter, boolean keyed, Factory factory, List reducers, Map metaData) { + super(name, reducers, metaData); this.buckets = buckets; this.order = order; assert (minDocCount == 0) == (emptyBucketInfo != null); @@ -281,10 +306,20 @@ public class InternalHistogram extends Inter return buckets; } - protected Factory getFactory() { + public Factory getFactory() { return factory; } + @Override + public InternalHistogram create(List buckets) { + return getFactory().create(buckets, this); + } + + @Override + public B createBucket(InternalAggregations aggregations, B prototype) { + return getFactory().createBucket(aggregations, prototype); + } + private static class IteratorAndCurrent { private final Iterator iterator; @@ -411,7 +446,7 @@ public class InternalHistogram extends Inter } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { List reducedBuckets = reduceBuckets(aggregations, reduceContext); // adding empty buckets if needed @@ -431,7 +466,8 @@ public class InternalHistogram extends Inter CollectionUtil.introSort(reducedBuckets, order.comparator()); } - return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, formatter, keyed, getMetaData()); + return getFactory().create(getName(), reducedBuckets, order, minDocCount, emptyBucketInfo, formatter, keyed, reducers(), + getMetaData()); } @Override @@ -461,7 +497,7 @@ public class InternalHistogram extends Inter } else if (factoryType.equals(TYPE.name())) { return new Factory<>(); } else { - throw new ElasticsearchIllegalStateException("Invalid histogram factory type [" + factoryType + "]"); + throw new IllegalStateException("Invalid histogram factory type [" + factoryType + "]"); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java index d314e44e901..0245f117835 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/InternalMissing.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -50,8 +52,8 @@ public class InternalMissing extends InternalSingleBucketAggregation implements InternalMissing() { } - InternalMissing(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + InternalMissing(String name, long docCount, InternalAggregations aggregations, List reducers, Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -61,6 +63,6 @@ public class InternalMissing extends InternalSingleBucketAggregation implements @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalMissing(name, docCount, subAggregations, getMetaData()); + return new InternalMissing(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java index 1b65bde9904..b60c8510238 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregator.java @@ -26,12 +26,14 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -42,8 +44,9 @@ public class MissingAggregator extends SingleBucketAggregator { private final ValuesSource valuesSource; public MissingAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; } @@ -69,12 +72,13 @@ public class MissingAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalMissing(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalMissing(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalMissing(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalMissing(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory { @@ -84,13 +88,15 @@ public class MissingAggregator extends SingleBucketAggregator { } @Override - protected MissingAggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new MissingAggregator(name, factories, null, aggregationContext, parent, metaData); + protected MissingAggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new MissingAggregator(name, factories, null, aggregationContext, parent, reducers, metaData); } @Override - protected MissingAggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new MissingAggregator(name, factories, valuesSource, aggregationContext, parent, metaData); + protected MissingAggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new MissingAggregator(name, factories, valuesSource, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java index b37de4c743c..6ecdc129dd0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingParser.java @@ -52,7 +52,8 @@ public class MissingParser implements Aggregator.Parser { } else if (vsParser.token(currentFieldName, token, parser)) { continue; } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java index 8b434a3fd24..86ad26edab3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalNested.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class InternalNested extends InternalSingleBucketAggregation implements N public InternalNested() { } - public InternalNested(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + public InternalNested(String name, long docCount, InternalAggregations aggregations, List reducers, + Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +63,6 @@ public class InternalNested extends InternalSingleBucketAggregation implements N @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalNested(name, docCount, subAggregations, getMetaData()); + return new InternalNested(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java index eec7345d317..6dfaad42b03 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/InternalReverseNested.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class InternalReverseNested extends InternalSingleBucketAggregation imple public InternalReverseNested() { } - public InternalReverseNested(String name, long docCount, InternalAggregations aggregations, Map metaData) { - super(name, docCount, aggregations, metaData); + public InternalReverseNested(String name, long docCount, InternalAggregations aggregations, List reducers, + Map metaData) { + super(name, docCount, aggregations, reducers, metaData); } @Override @@ -60,6 +63,6 @@ public class InternalReverseNested extends InternalSingleBucketAggregation imple @Override protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalReverseNested(name, docCount, subAggregations, getMetaData()); + return new InternalReverseNested(name, docCount, subAggregations, reducers(), getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java index 36ec1438caa..e6a246162ce 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregator.java @@ -22,15 +22,13 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilterCachingPolicy; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -40,9 +38,11 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -56,9 +56,9 @@ public class NestedAggregator extends SingleBucketAggregator { private DocIdSetIterator childDocs; private BitSet parentDocs; - public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, Map metaData, QueryCachingPolicy filterCachingPolicy) throws IOException { - super(name, factories, aggregationContext, parentAggregator, metaData); - childFilter = aggregationContext.searchContext().filterCache().cache(objectMapper.nestedTypeFilter(), null, filterCachingPolicy); + public NestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parentAggregator, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parentAggregator, reducers, metaData); + childFilter = objectMapper.nestedTypeFilter(); } @Override @@ -93,7 +93,7 @@ public class NestedAggregator extends SingleBucketAggregator { // aggs execution Filter parentFilterNotCached = findClosestNestedPath(parent()); if (parentFilterNotCached == null) { - parentFilterNotCached = NonNestedDocsFilter.INSTANCE; + parentFilterNotCached = Queries.newNonNestedFilter(); } parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(parentFilterNotCached); BitDocIdSet parentSet = parentFilter.getDocIdSet(ctx); @@ -121,12 +121,13 @@ public class NestedAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } private static Filter findClosestNestedPath(Aggregator parent) { @@ -143,42 +144,42 @@ public class NestedAggregator extends SingleBucketAggregator { public static class Factory extends AggregatorFactory { private final String path; - private final QueryCachingPolicy queryCachingPolicy; - public Factory(String name, String path, QueryCachingPolicy queryCachingPolicy) { + public Factory(String name, String path) { super(name, InternalNested.TYPE.name()); this.path = path; - this.queryCachingPolicy = queryCachingPolicy; } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } MapperService.SmartNameObjectMapper mapper = context.searchContext().smartNameObjectMapper(path); if (mapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } ObjectMapper objectMapper = mapper.mapper(); if (objectMapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } if (!objectMapper.nested().isNested()) { throw new AggregationExecutionException("[nested] nested path [" + path + "] is not nested"); } - return new NestedAggregator(name, factories, objectMapper, context, parent, metaData, queryCachingPolicy); + return new NestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData); } private final static class Unmapped extends NonCollectingAggregator { - public Unmapped(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + public Unmapped(String name, AggregationContext context, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, context, parent, reducers, metaData); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java index 61044fb4a28..ddf6bf17b6e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedParser.java @@ -49,18 +49,21 @@ public class NestedParser implements Aggregator.Parser { if ("path".equals(currentFieldName)) { path = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (path == null) { // "field" doesn't exist, so we fall back to the context of the ancestors - throw new SearchParseException(context, "Missing [path] field for nested aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [path] field for nested aggregation [" + aggregationName + "]", + parser.getTokenLocation()); } - return new NestedAggregator.Factory(aggregationName, path, context.queryParserService().autoFilterCachePolicy()); + return new NestedAggregator.Factory(aggregationName, path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java index 4dbeec5898f..a81aae9eef6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregator.java @@ -27,9 +27,9 @@ import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; import org.elasticsearch.common.lucene.docset.DocIdSets; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -40,9 +40,11 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -52,10 +54,12 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { private final BitDocIdSetFilter parentFilter; - public ReverseNestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + public ReverseNestedAggregator(String name, AggregatorFactories factories, ObjectMapper objectMapper, + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); if (objectMapper == null) { - parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); } else { parentFilter = context.searchContext().bitsetFilterCache().getBitDocIdSetFilter(objectMapper.nestedTypeFilter()); } @@ -105,12 +109,13 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { - return new InternalReverseNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalReverseNested(name, bucketDocCount(owningBucketOrdinal), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalReverseNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalReverseNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } Filter getParentFilter() { @@ -127,22 +132,24 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { // Early validation NestedAggregator closestNestedAggregator = findClosestNestedAggregator(parent); if (closestNestedAggregator == null) { - throw new SearchParseException(context.searchContext(), "Reverse nested aggregation [" + name + "] can only be used inside a [nested] aggregation"); + throw new SearchParseException(context.searchContext(), "Reverse nested aggregation [" + name + + "] can only be used inside a [nested] aggregation", null); } final ObjectMapper objectMapper; if (path != null) { MapperService.SmartNameObjectMapper mapper = context.searchContext().smartNameObjectMapper(path); if (mapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } objectMapper = mapper.mapper(); if (objectMapper == null) { - return new Unmapped(name, context, parent, metaData); + return new Unmapped(name, context, parent, reducers, metaData); } if (!objectMapper.nested().isNested()) { throw new AggregationExecutionException("[reverse_nested] nested path [" + path + "] is not nested"); @@ -150,18 +157,19 @@ public class ReverseNestedAggregator extends SingleBucketAggregator { } else { objectMapper = null; } - return new ReverseNestedAggregator(name, factories, objectMapper, context, parent, metaData); + return new ReverseNestedAggregator(name, factories, objectMapper, context, parent, reducers, metaData); } private final static class Unmapped extends NonCollectingAggregator { - public Unmapped(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + public Unmapped(String name, AggregationContext context, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, context, parent, reducers, metaData); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalReverseNested(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalReverseNested(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java index 0ab7cefc9e3..80ab9f5eebd 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedParser.java @@ -49,10 +49,12 @@ public class ReverseNestedParser implements Aggregator.Parser { if ("path".equals(currentFieldName)) { path = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index 5f8b7baa2ac..db0ccee33e5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -42,7 +43,8 @@ import java.util.Map; /** * */ -public class InternalRange extends InternalMultiBucketAggregation implements Range { +public class InternalRange> extends InternalMultiBucketAggregation + implements Range { static final Factory FACTORY = new Factory(); @@ -123,6 +125,14 @@ public class InternalRange extends InternalMulti return to; } + public boolean getKeyed() { + return keyed; + } + + public ValueFormatter getFormatter() { + return formatter; + } + @Override public String getFromAsString() { if (Double.isInfinite(from)) { @@ -215,31 +225,44 @@ public class InternalRange extends InternalMulti } } - public static class Factory> { + public static class Factory> { public String type() { return TYPE.name(); } - public R create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return (R) new InternalRange<>(name, ranges, formatter, keyed, metaData); + public R create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + return (R) new InternalRange<>(name, ranges, formatter, keyed, reducers, metaData); } - - public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { + public B createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, + @Nullable ValueFormatter formatter) { return (B) new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } + + public R create(List ranges, R prototype) { + return (R) new InternalRange<>(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), + prototype.metaData); + } + + public B createBucket(InternalAggregations aggregations, B prototype) { + return (B) new Bucket(prototype.getKey(), prototype.from, prototype.to, prototype.getDocCount(), aggregations, prototype.keyed, + prototype.formatter); + } } private List ranges; private Map rangeMap; - private @Nullable ValueFormatter formatter; - private boolean keyed; + @Nullable + protected ValueFormatter formatter; + protected boolean keyed; public InternalRange() {} // for serialization - public InternalRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - super(name, metaData); + public InternalRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + super(name, reducers, metaData); this.ranges = ranges; this.formatter = formatter; this.keyed = keyed; @@ -255,19 +278,29 @@ public class InternalRange extends InternalMulti return ranges; } - protected Factory getFactory() { + public Factory getFactory() { return FACTORY; } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public R create(List buckets) { + return getFactory().create(buckets, (R) this); + } + + @Override + public B createBucket(InternalAggregations aggregations, B prototype) { + return getFactory().createBucket(aggregations, prototype); + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { @SuppressWarnings("unchecked") List[] rangeList = new List[ranges.size()]; for (int i = 0; i < rangeList.length; ++i) { rangeList[i] = new ArrayList(); } for (InternalAggregation aggregation : aggregations) { - InternalRange ranges = (InternalRange) aggregation; + InternalRange ranges = (InternalRange) aggregation; int i = 0; for (Bucket range : ranges.ranges) { rangeList[i++].add(range); @@ -278,7 +311,7 @@ public class InternalRange extends InternalMulti for (int i = 0; i < this.ranges.size(); ++i) { ranges.add((B) rangeList[i].get(0).reduce(rangeList[i], reduceContext)); } - return getFactory().create(name, ranges, formatter, keyed, getMetaData()); + return getFactory().create(name, ranges, formatter, keyed, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 47011b8dc49..d6d961a5998 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -33,6 +33,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -104,10 +105,10 @@ public class RangeAggregator extends BucketsAggregator { List ranges, boolean keyed, AggregationContext aggregationContext, - Aggregator parent, + Aggregator parent, List reducers, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + super(name, factories, aggregationContext, parent, reducers, metaData); assert valuesSource != null; this.valuesSource = valuesSource; this.formatter = format != null ? format.formatter() : null; @@ -149,54 +150,54 @@ public class RangeAggregator extends BucketsAggregator { } } - private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { - int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes - int mid = (lo + hi) >>> 1; - while (lo <= hi) { - if (value < ranges[mid].from) { - hi = mid - 1; - } else if (value >= maxTo[mid]) { - lo = mid + 1; - } else { - break; - } - mid = (lo + hi) >>> 1; - } - if (lo > hi) return lo; // no potential candidate - - // binary search the lower bound - int startLo = lo, startHi = mid; - while (startLo <= startHi) { - final int startMid = (startLo + startHi) >>> 1; - if (value >= maxTo[startMid]) { - startLo = startMid + 1; - } else { - startHi = startMid - 1; - } - } - - // binary search the upper bound - int endLo = mid, endHi = hi; - while (endLo <= endHi) { - final int endMid = (endLo + endHi) >>> 1; - if (value < ranges[endMid].from) { - endHi = endMid - 1; - } else { - endLo = endMid + 1; - } - } - - assert startLo == lowBound || value >= maxTo[startLo - 1]; - assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from; - - for (int i = startLo; i <= endHi; ++i) { - if (ranges[i].matches(value)) { - collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); - } - } - - return endHi + 1; + private int collect(int doc, double value, long owningBucketOrdinal, int lowBound) throws IOException { + int lo = lowBound, hi = ranges.length - 1; // all candidates are between these indexes + int mid = (lo + hi) >>> 1; + while (lo <= hi) { + if (value < ranges[mid].from) { + hi = mid - 1; + } else if (value >= maxTo[mid]) { + lo = mid + 1; + } else { + break; } + mid = (lo + hi) >>> 1; + } + if (lo > hi) return lo; // no potential candidate + + // binary search the lower bound + int startLo = lo, startHi = mid; + while (startLo <= startHi) { + final int startMid = (startLo + startHi) >>> 1; + if (value >= maxTo[startMid]) { + startLo = startMid + 1; + } else { + startHi = startMid - 1; + } + } + + // binary search the upper bound + int endLo = mid, endHi = hi; + while (endLo <= endHi) { + final int endMid = (endLo + endHi) >>> 1; + if (value < ranges[endMid].from) { + endHi = endMid - 1; + } else { + endLo = endMid + 1; + } + } + + assert startLo == lowBound || value >= maxTo[startLo - 1]; + assert endHi == ranges.length - 1 || value < ranges[endHi + 1].from; + + for (int i = startLo; i <= endHi; ++i) { + if (ranges[i].matches(value)) { + collectBucket(sub, doc, subBucketOrdinal(owningBucketOrdinal, i)); + } + } + + return endHi + 1; + } }; } @@ -215,7 +216,7 @@ public class RangeAggregator extends BucketsAggregator { buckets.add(bucket); } // value source can be null in the case of unmapped fields - return rangeFactory.create(name, buckets, formatter, keyed, metaData()); + return rangeFactory.create(name, buckets, formatter, keyed, reducers(), metaData()); } @Override @@ -229,7 +230,7 @@ public class RangeAggregator extends BucketsAggregator { buckets.add(bucket); } // value source can be null in the case of unmapped fields - return rangeFactory.create(name, buckets, formatter, keyed, metaData()); + return rangeFactory.create(name, buckets, formatter, keyed, reducers(), metaData()); } private static final void sortRanges(final Range[] ranges) { @@ -266,10 +267,10 @@ public class RangeAggregator extends BucketsAggregator { ValueFormat format, AggregationContext context, Aggregator parent, - InternalRange.Factory factory, + InternalRange.Factory factory, List reducers, Map metaData) throws IOException { - super(name, context, parent, metaData); + super(name, context, parent, reducers, metaData); this.ranges = ranges; ValueParser parser = format != null ? format.parser() : ValueParser.RAW; for (Range range : this.ranges) { @@ -287,7 +288,7 @@ public class RangeAggregator extends BucketsAggregator { for (RangeAggregator.Range range : ranges) { buckets.add(factory.createBucket(range.key, range.from, range.to, 0, subAggs, keyed, formatter)); } - return factory.create(name, buckets, formatter, keyed, metaData()); + return factory.create(name, buckets, formatter, keyed, reducers(), metaData()); } } @@ -305,13 +306,15 @@ public class RangeAggregator extends BucketsAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, config.format(), aggregationContext, parent, rangeFactory, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new Unmapped(name, ranges, keyed, config.format(), aggregationContext, parent, rangeFactory, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new RangeAggregator(name, factories, valuesSource, config.format(), rangeFactory, ranges, keyed, aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new RangeAggregator(name, factories, valuesSource, config.format(), rangeFactory, ranges, keyed, aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java index dbe05df0998..e30b84bf1de 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeParser.java @@ -89,21 +89,25 @@ public class RangeParser implements Aggregator.Parser { ranges.add(new RangeAggregator.Range(key, from, fromAsStr, to, toAsStr)); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new RangeAggregator.Factory(aggregationName, vsParser.config(), InternalRange.FACTORY, ranges, keyed); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java index 06dcba53b95..940e20a79a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/DateRangeParser.java @@ -79,7 +79,8 @@ public class DateRangeParser implements Aggregator.Parser { } else if ("to".equals(toOrFromOrKey)) { to = parser.doubleValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_STRING) { if ("from".equals(toOrFromOrKey)) { @@ -89,7 +90,7 @@ public class DateRangeParser implements Aggregator.Parser { } else if ("key".equals(toOrFromOrKey)) { key = parser.text(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "].", parser.getTokenLocation()); } } } @@ -100,15 +101,18 @@ public class DateRangeParser implements Aggregator.Parser { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new RangeAggregator.Factory(aggregationName, vsParser.config(), InternalDateRange.FACTORY, ranges, keyed); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java index 785df76e824..6444f53e527 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/date/InternalDateRange.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; @@ -37,7 +38,7 @@ import java.util.Map; /** * */ -public class InternalDateRange extends InternalRange { +public class InternalDateRange extends InternalRange { public final static Type TYPE = new Type("date_range", "drange"); @@ -112,7 +113,7 @@ public class InternalDateRange extends InternalRange { } } - private static class Factory extends InternalRange.Factory { + public static class Factory extends InternalRange.Factory { @Override public String type() { @@ -120,20 +121,34 @@ public class InternalDateRange extends InternalRange { } @Override - public InternalDateRange create(String name, List ranges, ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalDateRange(name, ranges, formatter, keyed, metaData); + public InternalDateRange create(String name, List ranges, ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + return new InternalDateRange(name, ranges, formatter, keyed, reducers, metaData); + } + + @Override + public InternalDateRange create(List ranges, InternalDateRange prototype) { + return new InternalDateRange(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), + prototype.metaData); } @Override public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, ValueFormatter formatter) { return new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(), + prototype.getDocCount(), aggregations, prototype.getKeyed(), prototype.getFormatter()); + } } InternalDateRange() {} // for serialization - InternalDateRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - super(name, ranges, formatter, keyed, metaData); + InternalDateRange(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + super(name, ranges, formatter, keyed, reducers, metaData); } @Override @@ -142,7 +157,7 @@ public class InternalDateRange extends InternalRange { } @Override - protected InternalRange.Factory getFactory() { + public InternalRange.Factory getFactory() { return FACTORY; } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java index 713b94595f5..3eee3ceb704 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/GeoDistanceParser.java @@ -35,6 +35,7 @@ import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator; import org.elasticsearch.search.aggregations.bucket.range.RangeAggregator.Unmapped; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.GeoPointParser; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -98,13 +99,15 @@ public class GeoDistanceParser implements Aggregator.Parser { } else if ("distance_type".equals(currentFieldName) || "distanceType".equals(currentFieldName)) { distanceType = GeoDistance.fromString(parser.text()); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if ("ranges".equals(currentFieldName)) { @@ -138,20 +141,24 @@ public class GeoDistanceParser implements Aggregator.Parser { ranges.add(new RangeAggregator.Range(key(key, from, to), from, fromAsStr, to, toAsStr)); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in geo_distance aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in geo_distance aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } GeoPoint origin = geoPointParser.geoPoint(); if (origin == null) { - throw new SearchParseException(context, "Missing [origin] in geo_distance aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [origin] in geo_distance aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new GeoDistanceFactory(aggregationName, vsParser.config(), InternalGeoDistance.FACTORY, origin, unit, distanceType, ranges, keyed); @@ -179,14 +186,18 @@ public class GeoDistanceParser implements Aggregator.Parser { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new Unmapped(name, ranges, keyed, null, aggregationContext, parent, rangeFactory, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new Unmapped(name, ranges, keyed, null, aggregationContext, parent, rangeFactory, reducers, metaData); } @Override - protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(final ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, + Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) + throws IOException { DistanceSource distanceSource = new DistanceSource(valuesSource, distanceType, origin, unit); - return new RangeAggregator(name, factories, distanceSource, null, rangeFactory, ranges, keyed, aggregationContext, parent, metaData); + return new RangeAggregator(name, factories, distanceSource, null, rangeFactory, ranges, keyed, aggregationContext, parent, + reducers, metaData); } private static class DistanceSource extends ValuesSource.Numeric { diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java index da2c41d5233..b271c3336e0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/geodistance/InternalGeoDistance.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; @@ -35,7 +36,7 @@ import java.util.Map; /** * */ -public class InternalGeoDistance extends InternalRange { +public class InternalGeoDistance extends InternalRange { public static final Type TYPE = new Type("geo_distance", "gdist"); @@ -100,7 +101,7 @@ public class InternalGeoDistance extends InternalRange { + public static class Factory extends InternalRange.Factory { @Override public String type() { @@ -108,20 +109,34 @@ public class InternalGeoDistance extends InternalRange ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalGeoDistance(name, ranges, formatter, keyed, metaData); + public InternalGeoDistance create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + return new InternalGeoDistance(name, ranges, formatter, keyed, reducers, metaData); + } + + @Override + public InternalGeoDistance create(List ranges, InternalGeoDistance prototype) { + return new InternalGeoDistance(prototype.name, ranges, prototype.formatter, prototype.keyed, prototype.reducers(), + prototype.metaData); } @Override public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { return new Bucket(key, from, to, docCount, aggregations, keyed, formatter); } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(), + prototype.getDocCount(), aggregations, prototype.getKeyed(), prototype.getFormatter()); + } } InternalGeoDistance() {} // for serialization - public InternalGeoDistance(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - super(name, ranges, formatter, keyed, metaData); + public InternalGeoDistance(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, List reducers, + Map metaData) { + super(name, ranges, formatter, keyed, reducers, metaData); } @Override @@ -130,7 +145,7 @@ public class InternalGeoDistance extends InternalRange getFactory() { + public InternalRange.Factory getFactory() { return FACTORY; } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java index 9b608aa42d4..96668e67c69 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/InternalIPv4Range.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.range.InternalRange; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; @@ -35,7 +36,7 @@ import java.util.Map; /** * */ -public class InternalIPv4Range extends InternalRange { +public class InternalIPv4Range extends InternalRange { public static final long MAX_IP = 4294967296l; @@ -109,7 +110,7 @@ public class InternalIPv4Range extends InternalRange { } } - private static class Factory extends InternalRange.Factory { + public static class Factory extends InternalRange.Factory { @Override public String type() { @@ -117,20 +118,33 @@ public class InternalIPv4Range extends InternalRange { } @Override - public InternalIPv4Range create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, Map metaData) { - return new InternalIPv4Range(name, ranges, keyed, metaData); + public InternalIPv4Range create(String name, List ranges, @Nullable ValueFormatter formatter, boolean keyed, + List reducers, Map metaData) { + return new InternalIPv4Range(name, ranges, keyed, reducers, metaData); + } + + @Override + public InternalIPv4Range create(List ranges, InternalIPv4Range prototype) { + return new InternalIPv4Range(prototype.name, ranges, prototype.keyed, prototype.reducers(), prototype.metaData); } @Override public Bucket createBucket(String key, double from, double to, long docCount, InternalAggregations aggregations, boolean keyed, @Nullable ValueFormatter formatter) { return new Bucket(key, from, to, docCount, aggregations, keyed); } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.getKey(), ((Number) prototype.getFrom()).doubleValue(), ((Number) prototype.getTo()).doubleValue(), + prototype.getDocCount(), aggregations, prototype.getKeyed()); + } } public InternalIPv4Range() {} // for serialization - public InternalIPv4Range(String name, List ranges, boolean keyed, Map metaData) { - super(name, ranges, ValueFormatter.IPv4, keyed, metaData); + public InternalIPv4Range(String name, List ranges, boolean keyed, List reducers, + Map metaData) { + super(name, ranges, ValueFormatter.IPv4, keyed, reducers, metaData); } @Override @@ -139,7 +153,7 @@ public class InternalIPv4Range extends InternalRange { } @Override - protected InternalRange.Factory getFactory() { + public InternalRange.Factory getFactory() { return FACTORY; } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java index 49c9c90b16e..37891f6f239 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/range/ipv4/IpRangeParser.java @@ -99,21 +99,25 @@ public class IpRangeParser implements Aggregator.Parser { ranges.add(range); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (ranges == null) { - throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]"); + throw new SearchParseException(context, "Missing [ranges] in ranges aggregator [" + aggregationName + "]", + parser.getTokenLocation()); } return new RangeAggregator.Factory(aggregationName, vsParser.config(), InternalIPv4Range.FACTORY, ranges, keyed); @@ -122,7 +126,8 @@ public class IpRangeParser implements Aggregator.Parser { private static void parseMaskRange(String cidr, RangeAggregator.Range range, String aggregationName, SearchContext ctx) { long[] fromTo = IPv4RangeBuilder.cidrMaskToMinMax(cidr); if (fromTo == null) { - throw new SearchParseException(ctx, "invalid CIDR mask [" + cidr + "] in aggregation [" + aggregationName + "]"); + throw new SearchParseException(ctx, "invalid CIDR mask [" + cidr + "] in aggregation [" + aggregationName + "]", + null); } range.from = fromTo[0] < 0 ? Double.NEGATIVE_INFINITY : fromTo[0]; range.to = fromTo[1] < 0 ? Double.POSITIVE_INFINITY : fromTo[1]; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java index c74df049d12..5487e200fac 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedBytesHashSamplerAggregator.java @@ -26,16 +26,17 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -48,9 +49,10 @@ public class DiversifiedBytesHashSamplerAggregator extends SamplerAggregator { private int maxDocsPerValue; public DiversifiedBytesHashSamplerAggregator(String name, int shardSize, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData, ValuesSource valuesSource, + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData, + ValuesSource valuesSource, int maxDocsPerValue) throws IOException { - super(name, shardSize, factories, aggregationContext, parent, metaData); + super(name, shardSize, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.maxDocsPerValue = maxDocsPerValue; } @@ -103,7 +105,7 @@ public class DiversifiedBytesHashSamplerAggregator extends SamplerAggregator { values.setDocument(doc); final int valuesCount = values.count(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { final BytesRef bytes = values.valueAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java index bf196245ce1..bb34ea1c1a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedMapSamplerAggregator.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BytesRefHash; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; @@ -34,10 +33,12 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; +import java.util.List; import java.util.Map; public class DiversifiedMapSamplerAggregator extends SamplerAggregator { @@ -47,9 +48,9 @@ public class DiversifiedMapSamplerAggregator extends SamplerAggregator { private BytesRefHash bucketOrds; public DiversifiedMapSamplerAggregator(String name, int shardSize, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData, ValuesSource valuesSource, - int maxDocsPerValue) throws IOException { - super(name, shardSize, factories, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData, + ValuesSource valuesSource, int maxDocsPerValue) throws IOException { + super(name, shardSize, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.maxDocsPerValue = maxDocsPerValue; bucketOrds = new BytesRefHash(shardSize, aggregationContext.bigArrays()); @@ -110,7 +111,7 @@ public class DiversifiedMapSamplerAggregator extends SamplerAggregator { values.setDocument(doc); final int valuesCount = values.count(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { final BytesRef bytes = values.valueAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java index e5f963ed3ef..4947dff39e5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedNumericSamplerAggregator.java @@ -26,15 +26,16 @@ import org.apache.lucene.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; +import java.util.List; import java.util.Map; public class DiversifiedNumericSamplerAggregator extends SamplerAggregator { @@ -43,9 +44,9 @@ public class DiversifiedNumericSamplerAggregator extends SamplerAggregator { private int maxDocsPerValue; public DiversifiedNumericSamplerAggregator(String name, int shardSize, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData, ValuesSource.Numeric valuesSource, - int maxDocsPerValue) throws IOException { - super(name, shardSize, factories, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData, + ValuesSource.Numeric valuesSource, int maxDocsPerValue) throws IOException { + super(name, shardSize, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.maxDocsPerValue = maxDocsPerValue; } @@ -94,7 +95,7 @@ public class DiversifiedNumericSamplerAggregator extends SamplerAggregator { values.setDocument(doc); final int valuesCount = values.count(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { return values.valueAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java index 808acc49883..87f1d5d8ab2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedOrdinalsSamplerAggregator.java @@ -27,15 +27,16 @@ import org.apache.lucene.index.SortedDocValues; import org.apache.lucene.search.DiversifiedTopDocsCollector; import org.apache.lucene.search.DiversifiedTopDocsCollector.ScoreDocKey; import org.apache.lucene.search.TopDocsCollector; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; +import java.util.List; import java.util.Map; public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator { @@ -44,9 +45,9 @@ public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator { private int maxDocsPerValue; public DiversifiedOrdinalsSamplerAggregator(String name, int shardSize, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData, + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, int maxDocsPerValue) throws IOException { - super(name, shardSize, factories, aggregationContext, parent, metaData); + super(name, shardSize, factories, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.maxDocsPerValue = maxDocsPerValue; } @@ -100,7 +101,7 @@ public class DiversifiedOrdinalsSamplerAggregator extends SamplerAggregator { globalOrds.setDocument(doc); final long valuesCount = globalOrds.cardinality(); if (valuesCount > 1) { - throw new ElasticsearchIllegalArgumentException("Sample diversifying key must be a single valued-field"); + throw new IllegalArgumentException("Sample diversifying key must be a single valued-field"); } if (valuesCount == 1) { long result = globalOrds.ordAt(0); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java index 509c89e3ccc..ce24c60a3e4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/InternalSampler.java @@ -22,8 +22,10 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.InternalSingleBucketAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,8 @@ public class InternalSampler extends InternalSingleBucketAggregation implements InternalSampler() { } // for serialization - InternalSampler(String name, long docCount, InternalAggregations subAggregations, Map metaData) { - super(name, docCount, subAggregations, metaData); + InternalSampler(String name, long docCount, InternalAggregations subAggregations, List reducers, Map metaData) { + super(name, docCount, subAggregations, reducers, metaData); } @Override @@ -59,7 +61,8 @@ public class InternalSampler extends InternalSingleBucketAggregation implements } @Override - protected InternalSingleBucketAggregation newAggregation(String name, long docCount, InternalAggregations subAggregations) { - return new InternalSampler(name, docCount, subAggregations, metaData); + protected InternalSingleBucketAggregation newAggregation(String name, long docCount, + InternalAggregations subAggregations) { + return new InternalSampler(name, docCount, subAggregations, reducers(), metaData); } } \ No newline at end of file diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java index 27bfc8666c5..a3b7137975a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; @@ -31,6 +30,7 @@ import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.BestDocsDeferringCollector; import org.elasticsearch.search.aggregations.bucket.DeferringBucketCollector; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFacto import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -59,9 +60,9 @@ public class SamplerAggregator extends SingleBucketAggregator { @Override Aggregator create(String name, AggregatorFactories factories, int shardSize, int maxDocsPerValue, ValuesSource valuesSource, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { - return new DiversifiedMapSamplerAggregator(name, shardSize, factories, context, parent, metaData, valuesSource, + return new DiversifiedMapSamplerAggregator(name, shardSize, factories, context, parent, reducers, metaData, valuesSource, maxDocsPerValue); } @@ -75,9 +76,10 @@ public class SamplerAggregator extends SingleBucketAggregator { @Override Aggregator create(String name, AggregatorFactories factories, int shardSize, int maxDocsPerValue, ValuesSource valuesSource, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { - return new DiversifiedBytesHashSamplerAggregator(name, shardSize, factories, context, parent, metaData, valuesSource, + return new DiversifiedBytesHashSamplerAggregator(name, shardSize, factories, context, parent, reducers, metaData, + valuesSource, maxDocsPerValue); } @@ -91,8 +93,8 @@ public class SamplerAggregator extends SingleBucketAggregator { @Override Aggregator create(String name, AggregatorFactories factories, int shardSize, int maxDocsPerValue, ValuesSource valuesSource, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - return new DiversifiedOrdinalsSamplerAggregator(name, shardSize, factories, context, parent, metaData, + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { + return new DiversifiedOrdinalsSamplerAggregator(name, shardSize, factories, context, parent, reducers, metaData, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, maxDocsPerValue); } @@ -109,7 +111,7 @@ public class SamplerAggregator extends SingleBucketAggregator { return mode; } } - throw new ElasticsearchIllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); } private final ParseField parseField; @@ -119,7 +121,8 @@ public class SamplerAggregator extends SingleBucketAggregator { } abstract Aggregator create(String name, AggregatorFactories factories, int shardSize, int maxDocsPerValue, ValuesSource valuesSource, - AggregationContext context, Aggregator parent, Map metaData) throws IOException; + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException; abstract boolean needsGlobalOrdinals(); @@ -133,9 +136,9 @@ public class SamplerAggregator extends SingleBucketAggregator { protected final int shardSize; protected BestDocsDeferringCollector bdd; - public SamplerAggregator(String name, int shardSize, AggregatorFactories factories, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, metaData); + public SamplerAggregator(String name, int shardSize, AggregatorFactories factories, AggregationContext aggregationContext, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, reducers, metaData); this.shardSize = shardSize; } @@ -160,12 +163,13 @@ public class SamplerAggregator extends SingleBucketAggregator { @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOException { runDeferredCollections(owningBucketOrdinal); - return new InternalSampler(name, bdd == null ? 0 : bdd.getDocCount(), bucketAggregations(owningBucketOrdinal), metaData()); + return new InternalSampler(name, bdd == null ? 0 : bdd.getDocCount(), bucketAggregations(owningBucketOrdinal), reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalSampler(name, 0, buildEmptySubAggregations(), metaData()); + return new InternalSampler(name, 0, buildEmptySubAggregations(), reducers(), metaData()); } public static class Factory extends AggregatorFactory { @@ -179,12 +183,12 @@ public class SamplerAggregator extends SingleBucketAggregator { @Override public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, - Map metaData) throws IOException { + List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } - return new SamplerAggregator(name, shardSize, factories, context, parent, metaData); + return new SamplerAggregator(name, shardSize, factories, context, parent, reducers, metaData); } } @@ -204,7 +208,7 @@ public class SamplerAggregator extends SingleBucketAggregator { @Override protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent, - boolean collectsFromSingleBucket, Map metaData) throws IOException { + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); @@ -212,7 +216,7 @@ public class SamplerAggregator extends SingleBucketAggregator { if (valuesSource instanceof ValuesSource.Numeric) { - return new DiversifiedNumericSamplerAggregator(name, shardSize, factories, context, parent, metaData, + return new DiversifiedNumericSamplerAggregator(name, shardSize, factories, context, parent, reducers, metaData, (Numeric) valuesSource, maxDocsPerValue); } @@ -230,7 +234,7 @@ public class SamplerAggregator extends SingleBucketAggregator { if ((execution.needsGlobalOrdinals()) && (!(valuesSource instanceof ValuesSource.Bytes.WithOrdinals))) { execution = ExecutionMode.MAP; } - return execution.create(name, factories, shardSize, maxDocsPerValue, valuesSource, context, parent, metaData); + return execution.create(name, factories, shardSize, maxDocsPerValue, valuesSource, context, parent, reducers, metaData); } throw new AggregationExecutionException("Sampler aggregation cannot be applied to field [" + config.fieldContext().field() + @@ -238,11 +242,11 @@ public class SamplerAggregator extends SingleBucketAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) - throws IOException { - final UnmappedSampler aggregation = new UnmappedSampler(name, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final UnmappedSampler aggregation = new UnmappedSampler(name, reducers, metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, factories, metaData) { + return new NonCollectingAggregator(name, aggregationContext, parent, factories, reducers, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java index 35a2963187e..d82dd2c6721 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerParser.java @@ -73,17 +73,18 @@ public class SamplerParser implements Aggregator.Parser { maxDocsPerValue = parser.intValue(); } else { throw new SearchParseException(context, "Unsupported property \"" + currentFieldName + "\" for aggregation \"" - + aggregationName); + + aggregationName, parser.getTokenLocation()); } } else if (!vsParser.token(currentFieldName, token, parser)) { if (EXECUTION_HINT_FIELD.match(currentFieldName)) { executionHint = parser.text(); } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } else { throw new SearchParseException(context, "Unsupported property \"" + currentFieldName + "\" for aggregation \"" - + aggregationName); + + aggregationName, parser.getTokenLocation()); } } @@ -93,7 +94,8 @@ public class SamplerParser implements Aggregator.Parser { } else { if (diversityChoiceMade) { throw new SearchParseException(context, "Sampler aggregation has " + MAX_DOCS_PER_VALUE_FIELD.getPreferredName() - + " setting but no \"field\" or \"script\" setting to provide values for aggregation \"" + aggregationName + "\""); + + " setting but no \"field\" or \"script\" setting to provide values for aggregation \"" + aggregationName + "\"", + parser.getTokenLocation()); } return new SamplerAggregator.Factory(aggregationName, shardSize); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java index 95f8c7bfe78..5b019ee8bfb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/UnmappedSampler.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.List; @@ -52,8 +53,8 @@ public class UnmappedSampler extends InternalSampler { UnmappedSampler() { } - public UnmappedSampler(String name, Map metaData) { - super(name, 0, InternalAggregations.EMPTY, metaData); + public UnmappedSampler(String name, List reducers, Map metaData) { + super(name, 0, InternalAggregations.EMPTY, reducers, metaData); } @Override @@ -62,7 +63,7 @@ public class UnmappedSampler extends InternalSampler { } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { for (InternalAggregation agg : aggregations) { if (!(agg instanceof UnmappedSampler)) { return agg.reduce(aggregations, reduceContext); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java index 49a7e56eefb..492167f1735 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/GlobalOrdinalsSignificantTermsAggregator.java @@ -25,10 +25,11 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.LongHash; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.terms.GlobalOrdinalsStringTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.ContextIndexSearcher; @@ -36,6 +37,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -46,12 +48,13 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri protected long numCollectedDocs; protected final SignificantTermsAggregatorFactory termsAggFactory; - public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, - BucketCountThresholds bucketCountThresholds, - IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, - SignificantTermsAggregatorFactory termsAggFactory, Map metaData) throws IOException { + public GlobalOrdinalsSignificantTermsAggregator(String name, AggregatorFactories factories, + ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, + IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, + SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { - super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, metaData); + super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, + SubAggCollectionMode.DEPTH_FIRST, false, reducers, metaData); this.termsAggFactory = termsAggFactory; } @@ -124,7 +127,9 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri list[i] = bucket; } - return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), metaData()); + return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), reducers(), + metaData()); } @Override @@ -133,7 +138,9 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Collections.emptyList(), metaData()); + return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), + Collections. emptyList(), reducers(), metaData()); } @Override @@ -145,8 +152,8 @@ public class GlobalOrdinalsSignificantTermsAggregator extends GlobalOrdinalsStri private final LongHash bucketOrds; - public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, Map metaData) throws IOException { - super(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggFactory, metaData); + public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, termsAggFactory, reducers, metaData); bucketOrds = new LongHash(1, aggregationContext.bigArrays()); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java index 6ea57b606a9..825bb637fef 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/InternalSignificantTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.util.ArrayList; import java.util.Arrays; @@ -38,12 +39,13 @@ import java.util.Map; /** * */ -public abstract class InternalSignificantTerms extends InternalMultiBucketAggregation implements SignificantTerms, ToXContent, Streamable { +public abstract class InternalSignificantTerms extends + InternalMultiBucketAggregation implements SignificantTerms, ToXContent, Streamable { protected SignificanceHeuristic significanceHeuristic; protected int requiredSize; protected long minDocCount; - protected List buckets; + protected List buckets; protected Map bucketMap; protected long subsetSize; protected long supersetSize; @@ -122,8 +124,10 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg } } - protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, Map metaData) { - super(name, metaData); + protected InternalSignificantTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, + SignificanceHeuristic significanceHeuristic, List buckets, List reducers, + Map metaData) { + super(name, reducers, metaData); this.requiredSize = requiredSize; this.minDocCount = minDocCount; this.buckets = buckets; @@ -156,20 +160,20 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { long globalSubsetSize = 0; long globalSupersetSize = 0; // Compute the overall result set size and the corpus size using the // top-level Aggregations from each shard for (InternalAggregation aggregation : aggregations) { - InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; + InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; globalSubsetSize += terms.subsetSize; globalSupersetSize += terms.supersetSize; } Map> buckets = new HashMap<>(); for (InternalAggregation aggregation : aggregations) { - InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; + InternalSignificantTerms terms = (InternalSignificantTerms) aggregation; for (Bucket bucket : terms.buckets) { List existingBuckets = buckets.get(bucket.getKey()); if (existingBuckets == null) { @@ -197,9 +201,10 @@ public abstract class InternalSignificantTerms extends InternalMultiBucketAggreg for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = (Bucket) ordered.pop(); } - return newAggregation(globalSubsetSize, globalSupersetSize, Arrays.asList(list)); + return create(globalSubsetSize, globalSupersetSize, Arrays.asList(list), this); } - abstract InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, List buckets); + protected abstract A create(long subsetSize, long supersetSize, List buckets, + InternalSignificantTerms prototype); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java index fa3b7587533..a450f9d0933 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTerms.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -41,7 +42,7 @@ import java.util.Map; /** * */ -public class SignificantLongTerms extends InternalSignificantTerms { +public class SignificantLongTerms extends InternalSignificantTerms { public static final Type TYPE = new Type("significant_terms", "siglterms"); @@ -161,16 +162,16 @@ public class SignificantLongTerms extends InternalSignificantTerms { return builder; } } - private ValueFormatter formatter; SignificantLongTerms() { } // for serialization - public SignificantLongTerms(long subsetSize, long supersetSize, String name, @Nullable ValueFormatter formatter, - int requiredSize, long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, Map metaData) { + public SignificantLongTerms(long subsetSize, long supersetSize, String name, @Nullable ValueFormatter formatter, int requiredSize, + long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, + List reducers, Map metaData) { - super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, metaData); + super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, reducers, metaData); this.formatter = formatter; } @@ -180,9 +181,24 @@ public class SignificantLongTerms extends InternalSignificantTerms { } @Override - InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, - List buckets) { - return new SignificantLongTerms(subsetSize, supersetSize, getName(), formatter, requiredSize, minDocCount, significanceHeuristic, buckets, getMetaData()); + public SignificantLongTerms create(List buckets) { + return new SignificantLongTerms(this.subsetSize, this.supersetSize, this.name, this.formatter, this.requiredSize, this.minDocCount, + this.significanceHeuristic, buckets, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, SignificantLongTerms.Bucket prototype) { + return new Bucket(prototype.subsetDf, prototype.subsetSize, prototype.supersetDf, prototype.supersetSize, prototype.term, + aggregations, prototype.formatter); + } + + @Override + protected SignificantLongTerms create(long subsetSize, long supersetSize, + List buckets, + InternalSignificantTerms prototype) { + return new SignificantLongTerms(subsetSize, supersetSize, prototype.getName(), ((SignificantLongTerms) prototype).formatter, + prototype.requiredSize, prototype.minDocCount, prototype.significanceHeuristic, buckets, prototype.reducers(), + prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java index 0b8d5813721..329f5f566f5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantLongTermsAggregator.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.LongTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -36,6 +37,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -44,10 +46,12 @@ import java.util.Map; public class SignificantLongTermsAggregator extends LongTermsAggregator { public SignificantLongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - BucketCountThresholds bucketCountThresholds, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, IncludeExclude.LongFilter includeExclude, Map metaData) throws IOException { + BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, + Aggregator parent, SignificantTermsAggregatorFactory termsAggFactory, IncludeExclude.LongFilter includeExclude, + List reducers, Map metaData) throws IOException { - super(name, factories, valuesSource, format, null, bucketCountThresholds, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, includeExclude, metaData); + super(name, factories, valuesSource, format, null, bucketCountThresholds, aggregationContext, parent, + SubAggCollectionMode.DEPTH_FIRST, false, includeExclude, reducers, metaData); this.termsAggFactory = termsAggFactory; } @@ -102,7 +106,9 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { bucket.aggregations = bucketAggregations(bucket.bucketOrd); list[i] = bucket; } - return new SignificantLongTerms(subsetSize, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), metaData()); + return new SignificantLongTerms(subsetSize, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), reducers(), + metaData()); } @Override @@ -111,7 +117,9 @@ public class SignificantLongTermsAggregator extends LongTermsAggregator { ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantLongTerms(0, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Collections.emptyList(), metaData()); + return new SignificantLongTerms(0, supersetSize, name, formatter, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), + Collections. emptyList(), reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java index 374f3a09ce5..9fbaa6cc375 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTerms.java @@ -30,6 +30,7 @@ import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -40,7 +41,7 @@ import java.util.Map; /** * */ -public class SignificantStringTerms extends InternalSignificantTerms { +public class SignificantStringTerms extends InternalSignificantTerms { public static final InternalAggregation.Type TYPE = new Type("significant_terms", "sigsterms"); @@ -159,9 +160,10 @@ public class SignificantStringTerms extends InternalSignificantTerms { SignificantStringTerms() {} // for serialization - public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, - long minDocCount, SignificanceHeuristic significanceHeuristic, List buckets, Map metaData) { - super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, metaData); + public SignificantStringTerms(long subsetSize, long supersetSize, String name, int requiredSize, long minDocCount, + SignificanceHeuristic significanceHeuristic, List buckets, List reducers, + Map metaData) { + super(subsetSize, supersetSize, name, requiredSize, minDocCount, significanceHeuristic, buckets, reducers, metaData); } @Override @@ -170,9 +172,22 @@ public class SignificantStringTerms extends InternalSignificantTerms { } @Override - InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, - List buckets) { - return new SignificantStringTerms(subsetSize, supersetSize, getName(), requiredSize, minDocCount, significanceHeuristic, buckets, getMetaData()); + public SignificantStringTerms create(List buckets) { + return new SignificantStringTerms(this.subsetSize, this.supersetSize, this.name, this.requiredSize, this.minDocCount, + this.significanceHeuristic, buckets, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, SignificantStringTerms.Bucket prototype) { + return new Bucket(prototype.termBytes, prototype.subsetDf, prototype.subsetSize, prototype.supersetDf, prototype.supersetSize, + aggregations); + } + + @Override + protected SignificantStringTerms create(long subsetSize, long supersetSize, List buckets, + InternalSignificantTerms prototype) { + return new SignificantStringTerms(subsetSize, supersetSize, prototype.getName(), prototype.requiredSize, prototype.minDocCount, + prototype.significanceHeuristic, buckets, prototype.reducers(), prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java index 532a71efae7..a49f18734ee 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantStringTermsAggregator.java @@ -28,6 +28,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.StringTermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.internal.ContextIndexSearcher; @@ -35,6 +36,7 @@ import org.elasticsearch.search.internal.ContextIndexSearcher; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -48,9 +50,11 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { public SignificantStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, BucketCountThresholds bucketCountThresholds, IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, - SignificantTermsAggregatorFactory termsAggFactory, Map metaData) throws IOException { + SignificantTermsAggregatorFactory termsAggFactory, List reducers, Map metaData) + throws IOException { - super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, SubAggCollectionMode.DEPTH_FIRST, false, metaData); + super(name, factories, valuesSource, null, bucketCountThresholds, includeExclude, aggregationContext, parent, + SubAggCollectionMode.DEPTH_FIRST, false, reducers, metaData); this.termsAggFactory = termsAggFactory; } @@ -107,7 +111,9 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { list[i] = bucket; } - return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), metaData()); + return new SignificantStringTerms(subsetSize, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Arrays.asList(list), reducers(), + metaData()); } @Override @@ -116,7 +122,9 @@ public class SignificantStringTermsAggregator extends StringTermsAggregator { ContextIndexSearcher searcher = context.searchContext().searcher(); IndexReader topReader = searcher.getIndexReader(); int supersetSize = topReader.numDocs(); - return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), Collections.emptyList(), metaData()); + return new SignificantStringTerms(0, supersetSize, name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), termsAggFactory.getSignificanceHeuristic(), + Collections. emptyList(), reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java index ef837cfab82..ea4085f7cf2 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsAggregatorFactory.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.lease.Releasable; import org.elasticsearch.common.lucene.index.FilterableTermsEnum; @@ -32,13 +31,13 @@ import org.elasticsearch.common.lucene.index.FreqTermsEnum; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; -import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -46,6 +45,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -63,10 +63,12 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac @Override Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, - TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException { + TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException { final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(); - return new SignificantStringTermsAggregator(name, factories, valuesSource, bucketCountThresholds, filter, aggregationContext, parent, termsAggregatorFactory, metaData); + return new SignificantStringTermsAggregator(name, factories, valuesSource, bucketCountThresholds, filter, + aggregationContext, parent, termsAggregatorFactory, reducers, metaData); } }, @@ -75,11 +77,12 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac @Override Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException { ValuesSource.Bytes.WithOrdinals valueSourceWithOrdinals = (ValuesSource.Bytes.WithOrdinals) valuesSource; IndexSearcher indexSearcher = aggregationContext.searchContext().searcher(); final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); - return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext, parent, termsAggregatorFactory, metaData); + return new GlobalOrdinalsSignificantTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext, parent, termsAggregatorFactory, reducers, metaData); } }, @@ -88,9 +91,12 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac @Override Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); - return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, aggregationContext, parent, termsAggregatorFactory, metaData); + return new GlobalOrdinalsSignificantTermsAggregator.WithHash(name, factories, + (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, bucketCountThresholds, filter, + aggregationContext, parent, termsAggregatorFactory, reducers, metaData); } }; @@ -100,7 +106,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac return mode; } } - throw new ElasticsearchIllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); + throw new IllegalArgumentException("Unknown `execution_hint`: [" + value + "], expected any of " + values()); } private final ParseField parseField; @@ -111,7 +117,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac abstract Aggregator create(String name, AggregatorFactories factories, ValuesSource valuesSource, TermsAggregator.BucketCountThresholds bucketCountThresholds, IncludeExclude includeExclude, - AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, Map metaData) throws IOException; + AggregationContext aggregationContext, Aggregator parent, SignificantTermsAggregatorFactory termsAggregatorFactory, + List reducers, Map metaData) throws IOException; @Override public String toString() { @@ -148,9 +155,11 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getMinDocCount(), metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final InternalAggregation aggregation = new UnmappedSignificantTerms(name, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getMinDocCount(), reducers, metaData); + return new NonCollectingAggregator(name, aggregationContext, parent, reducers, metaData) { @Override public InternalAggregation buildEmptyAggregation() { return aggregation; @@ -159,7 +168,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } @@ -182,7 +192,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } } assert execution != null; - return execution.create(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, this, metaData); + return execution.create(name, factories, valuesSource, bucketCountThresholds, includeExclude, aggregationContext, parent, this, + reducers, metaData); } @@ -200,7 +211,8 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac if (includeExclude != null) { longFilter = includeExclude.convertToLongFilter(); } - return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), bucketCountThresholds, aggregationContext, parent, this, longFilter, metaData); + return new SignificantLongTermsAggregator(name, factories, (ValuesSource.Numeric) valuesSource, config.format(), + bucketCountThresholds, aggregationContext, parent, this, longFilter, reducers, metaData); } throw new AggregationExecutionException("sigfnificant_terms aggregation cannot be applied to field [" + config.fieldContext().field() + @@ -254,7 +266,7 @@ public class SignificantTermsAggregatorFactory extends ValuesSourceAggregatorFac } @Override - public void close() throws ElasticsearchException { + public void close() { try { if (termsEnum instanceof Releasable) { ((Releasable) termsEnum).close(); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java index 48d70c0f9c2..961ca04872b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/SignificantTermsBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.significant; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -133,7 +132,7 @@ public class SignificantTermsBuilder extends AggregationBuilder { public static final Type TYPE = new Type("significant_terms", "umsigterms"); @@ -55,10 +57,10 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { UnmappedSignificantTerms() {} // for serialization - public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, Map metaData) { + public UnmappedSignificantTerms(String name, int requiredSize, long minDocCount, List reducers, Map metaData) { //We pass zero for index/subset sizes because for the purpose of significant term analysis // we assume an unmapped index's size is irrelevant to the proceedings. - super(0, 0, name, requiredSize, minDocCount, JLHScore.INSTANCE, BUCKETS, metaData); + super(0, 0, name, requiredSize, minDocCount, JLHScore.INSTANCE, BUCKETS, reducers, metaData); } @Override @@ -67,7 +69,22 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public UnmappedSignificantTerms create(List buckets) { + return new UnmappedSignificantTerms(this.name, this.requiredSize, this.minDocCount, this.reducers(), this.metaData); + } + + @Override + public InternalSignificantTerms.Bucket createBucket(InternalAggregations aggregations, InternalSignificantTerms.Bucket prototype) { + throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms"); + } + + @Override + protected UnmappedSignificantTerms create(long subsetSize, long supersetSize, List buckets, InternalSignificantTerms prototype) { + throw new UnsupportedOperationException("not supported for UnmappedSignificantTerms"); + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { for (InternalAggregation aggregation : aggregations) { if (!(aggregation instanceof UnmappedSignificantTerms)) { return aggregation.reduce(aggregations, reduceContext); @@ -76,11 +93,6 @@ public class UnmappedSignificantTerms extends InternalSignificantTerms { return this; } - @Override - InternalSignificantTerms newAggregation(long subsetSize, long supersetSize, List buckets) { - throw new UnsupportedOperationException("How did you get there?"); - } - @Override protected void doReadFrom(StreamInput in) throws IOException { this.requiredSize = readSize(in); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java index b7fb56c4ac1..6c3753190b1 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/JLHScore.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java index 8b158117384..a8f84c8070a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/NXYSignificanceHeuristic.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamOutput; @@ -124,13 +123,13 @@ public abstract class NXYSignificanceHeuristic extends SignificanceHeuristic { checkFrequencyValidity(subsetFreq, subsetSize, supersetFreq, supersetSize, scoreFunctionName); if (backgroundIsSuperset) { if (subsetFreq > supersetFreq) { - throw new ElasticsearchIllegalArgumentException("subsetFreq > supersetFreq" + SCORE_ERROR_MESSAGE); + throw new IllegalArgumentException("subsetFreq > supersetFreq" + SCORE_ERROR_MESSAGE); } if (subsetSize > supersetSize) { - throw new ElasticsearchIllegalArgumentException("subsetSize > supersetSize" + SCORE_ERROR_MESSAGE); + throw new IllegalArgumentException("subsetSize > supersetSize" + SCORE_ERROR_MESSAGE); } if (supersetFreq - subsetFreq > supersetSize - subsetSize) { - throw new ElasticsearchIllegalArgumentException("supersetFreq - subsetFreq > supersetSize - subsetSize" + SCORE_ERROR_MESSAGE); + throw new IllegalArgumentException("supersetFreq - subsetFreq > supersetSize - subsetSize" + SCORE_ERROR_MESSAGE); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java index c273a3f3cc1..559d02fc4de 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/ScriptHeuristic.java @@ -83,7 +83,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } public void initialize(InternalAggregation.ReduceContext context) { - script = context.scriptService().executable(scriptLang, scriptString, scriptType, ScriptContext.Standard.AGGS, params); + script = context.scriptService().executable(new Script(scriptLang, scriptString, scriptType, params), ScriptContext.Standard.AGGS); script.setNextVar("_subset_freq", subsetDfHolder); script.setNextVar("_subset_size", subsetSizeHolder); script.setNextVar("_superset_freq", supersetDfHolder); @@ -168,7 +168,7 @@ public class ScriptHeuristic extends SignificanceHeuristic { } ExecutableScript searchScript; try { - searchScript = scriptService.executable(scriptLang, script, scriptType, ScriptContext.Standard.AGGS, params); + searchScript = scriptService.executable(new Script(scriptLang, script, scriptType, params), ScriptContext.Standard.AGGS); } catch (Exception e) { throw new ElasticsearchParseException("The script [" + script + "] could not be loaded", e); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java index b3da709323b..4f12277ca0f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/significant/heuristics/SignificanceHeuristic.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.aggregations.bucket.significant.heuristics; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.InternalAggregation; @@ -40,13 +39,13 @@ public abstract class SignificanceHeuristic { protected void checkFrequencyValidity(long subsetFreq, long subsetSize, long supersetFreq, long supersetSize, String scoreFunctionName) { if (subsetFreq < 0 || subsetSize < 0 || supersetFreq < 0 || supersetSize < 0) { - throw new ElasticsearchIllegalArgumentException("Frequencies of subset and superset must be positive in " + scoreFunctionName + ".getScore()"); + throw new IllegalArgumentException("Frequencies of subset and superset must be positive in " + scoreFunctionName + ".getScore()"); } if (subsetFreq > subsetSize) { - throw new ElasticsearchIllegalArgumentException("subsetFreq > subsetSize, in " + scoreFunctionName); + throw new IllegalArgumentException("subsetFreq > subsetSize, in " + scoreFunctionName); } if (supersetFreq > supersetSize) { - throw new ElasticsearchIllegalArgumentException("supersetFreq > supersetSize, in " + scoreFunctionName); + throw new IllegalArgumentException("supersetFreq > supersetSize, in " + scoreFunctionName); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java index e87821e4e38..363895c5a39 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/AbstractStringTermsAggregator.java @@ -22,27 +22,30 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; import java.util.Collections; +import java.util.List; import java.util.Map; abstract class AbstractStringTermsAggregator extends TermsAggregator { protected final boolean showTermDocCountError; - public AbstractStringTermsAggregator(String name, AggregatorFactories factories, - AggregationContext context, Aggregator parent, - Terms.Order order, BucketCountThresholds bucketCountThresholds, - SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, context, parent, bucketCountThresholds, order, subAggCollectMode, metaData); + public AbstractStringTermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, + Terms.Order order, BucketCountThresholds bucketCountThresholds, SubAggCollectionMode subAggCollectMode, + boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, context, parent, bucketCountThresholds, order, subAggCollectMode, reducers, metaData); this.showTermDocCountError = showTermDocCountError; } @Override public InternalAggregation buildEmptyAggregation() { - return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Collections.emptyList(), showTermDocCountError, 0, 0, metaData()); + return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Collections. emptyList(), showTermDocCountError, 0, 0, + reducers(), metaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java index c004f6e1e90..dbb8061db09 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -39,7 +40,7 @@ import java.util.Map; /** * */ -public class DoubleTerms extends InternalTerms { +public class DoubleTerms extends InternalTerms { public static final Type TYPE = new Type("terms", "dterms"); @@ -84,7 +85,8 @@ public class DoubleTerms extends InternalTerms { super(formatter, showDocCountError); } - public Bucket(double term, long docCount, InternalAggregations aggregations, boolean showDocCountError, long docCountError, @Nullable ValueFormatter formatter) { + public Bucket(double term, long docCount, InternalAggregations aggregations, boolean showDocCountError, long docCountError, + @Nullable ValueFormatter formatter) { super(docCount, aggregations, showDocCountError, docCountError, formatter); this.term = term; } @@ -152,12 +154,17 @@ public class DoubleTerms extends InternalTerms { } } - private @Nullable ValueFormatter formatter; + private @Nullable + ValueFormatter formatter; - DoubleTerms() {} // for serialization + DoubleTerms() { + } // for serialization - public DoubleTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public DoubleTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, + long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, + long otherDocCount, List reducers, Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, + metaData); this.formatter = formatter; } @@ -167,8 +174,23 @@ public class DoubleTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - return new DoubleTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public DoubleTerms create(List buckets) { + return new DoubleTerms(this.name, this.order, this.formatter, this.requiredSize, this.shardSize, this.minDocCount, buckets, + this.showTermDocCountError, this.docCountError, this.otherDocCount, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.term, prototype.docCount, aggregations, prototype.showDocCountError, prototype.docCountError, + prototype.formatter); + } + + @Override + protected DoubleTerms create(String name, List buckets, + long docCountError, long otherDocCount, InternalTerms prototype) { + return new DoubleTerms(name, prototype.order, ((DoubleTerms) prototype).formatter, prototype.requiredSize, prototype.shardSize, + prototype.minDocCount, buckets, prototype.showTermDocCountError, docCountError, otherDocCount, prototype.reducers(), + prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java index e71be14dc5b..9250495524e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/DoubleTermsAggregator.java @@ -26,6 +26,7 @@ import org.elasticsearch.index.fielddata.FieldData; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -33,6 +34,7 @@ import org.elasticsearch.search.aggregations.support.format.ValueFormat; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -41,8 +43,11 @@ import java.util.Map; public class DoubleTermsAggregator extends LongTermsAggregator { public DoubleTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, Map metaData) throws IOException { - super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, showTermDocCountError, longFilter, metaData); + Terms.Order order, BucketCountThresholds bucketCountThresholds, + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, + IncludeExclude.LongFilter longFilter, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, format, order, bucketCountThresholds, aggregationContext, parent, collectionMode, + showTermDocCountError, longFilter, reducers, metaData); } @Override @@ -73,7 +78,9 @@ public class DoubleTermsAggregator extends LongTermsAggregator { for (int i = 0; i < buckets.length; ++i) { buckets[i] = convertToDouble(buckets[i]); } - return new DoubleTerms(terms.getName(), terms.order, terms.formatter, terms.requiredSize, terms.shardSize, terms.minDocCount, Arrays.asList(buckets), terms.showTermDocCountError, terms.docCountError, terms.otherDocCount, terms.getMetaData()); + return new DoubleTerms(terms.getName(), terms.order, terms.formatter, terms.requiredSize, terms.shardSize, terms.minDocCount, + Arrays.asList(buckets), terms.showTermDocCountError, terms.docCountError, terms.otherDocCount, terms.reducers(), + terms.getMetaData()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java index 767f2d50926..67766c79c19 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/GlobalOrdinalsStringTermsAggregator.java @@ -37,18 +37,20 @@ import org.elasticsearch.index.fielddata.AbstractRandomAccessOrds; import org.elasticsearch.index.fielddata.ordinals.GlobalOrdinalMapping; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.bucket.terms.InternalTerms.Bucket; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -71,8 +73,9 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public GlobalOrdinalsStringTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, - IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, metaData); + IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers, + metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; } @@ -196,7 +199,9 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr bucket.docCountError = 0; } - return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, metaData()); + return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), + metaData()); } /** @@ -261,8 +266,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr public WithHash(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, IncludeExclude.OrdinalsFilter includeExclude, AggregationContext aggregationContext, - Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, collectionMode, showTermDocCountError, metaData); + Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, collectionMode, showTermDocCountError, reducers, metaData); bucketOrds = new LongHash(1, aggregationContext.bigArrays()); } @@ -330,8 +335,8 @@ public class GlobalOrdinalsStringTermsAggregator extends AbstractStringTermsAggr private RandomAccessOrds segmentOrds; public LowCardinality(String name, AggregatorFactories factories, ValuesSource.Bytes.WithOrdinals.FieldData valuesSource, - Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { - super(name, factories, valuesSource, order, bucketCountThresholds, null, aggregationContext, parent, collectionMode, showTermDocCountError, metaData); + Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { + super(name, factories, valuesSource, order, bucketCountThresholds, null, aggregationContext, parent, collectionMode, showTermDocCountError, reducers, metaData); assert factories == null || factories.count() == 0; this.segmentDocCounts = context.bigArrays().newIntArray(1, true); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index ff7cf1ab78d..c5fedd659f0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -22,7 +22,6 @@ import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Maps; import com.google.common.collect.Multimap; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.stream.Streamable; import org.elasticsearch.common.xcontent.ToXContent; @@ -31,6 +30,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.util.ArrayList; @@ -42,7 +42,8 @@ import java.util.Map; /** * */ -public abstract class InternalTerms extends InternalMultiBucketAggregation implements Terms, ToXContent, Streamable { +public abstract class InternalTerms extends InternalMultiBucketAggregation + implements Terms, ToXContent, Streamable { protected static final String DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME = "doc_count_error_upper_bound"; protected static final String SUM_OF_OTHER_DOC_COUNTS = "sum_other_doc_count"; @@ -78,7 +79,7 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple @Override public long getDocCountError() { if (!showDocCountError) { - throw new ElasticsearchIllegalStateException("show_terms_doc_count_error is false"); + throw new IllegalStateException("show_terms_doc_count_error is false"); } return docCountError; } @@ -114,7 +115,7 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple protected int requiredSize; protected int shardSize; protected long minDocCount; - protected List buckets; + protected List buckets; protected Map bucketMap; protected long docCountError; protected boolean showTermDocCountError; @@ -122,8 +123,10 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple protected InternalTerms() {} // for serialization - protected InternalTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, metaData); + protected InternalTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, List reducers, + Map metaData) { + super(name, reducers, metaData); this.order = order; this.requiredSize = requiredSize; this.shardSize = shardSize; @@ -162,13 +165,13 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { Multimap buckets = ArrayListMultimap.create(); long sumDocCountError = 0; long otherDocCount = 0; for (InternalAggregation aggregation : aggregations) { - InternalTerms terms = (InternalTerms) aggregation; + InternalTerms terms = (InternalTerms) aggregation; otherDocCount += terms.getSumOfOtherDocCounts(); final long thisAggDocCountError; if (terms.buckets.size() < this.shardSize || this.order == InternalOrder.TERM_ASC || this.order == InternalOrder.TERM_DESC) { @@ -221,9 +224,10 @@ public abstract class InternalTerms extends InternalMultiBucketAggregation imple } else { docCountError = aggregations.size() == 1 ? 0 : sumDocCountError; } - return newAggregation(name, Arrays.asList(list), showTermDocCountError, docCountError, otherDocCount, getMetaData()); + return create(name, Arrays.asList(list), docCountError, otherDocCount, this); } - protected abstract InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData); + protected abstract A create(String name, List buckets, long docCountError, long otherDocCount, + InternalTerms prototype); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java index 1a7c2b4d0ee..eee9e6bfc4b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTerms.java @@ -26,6 +26,7 @@ import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -38,7 +39,7 @@ import java.util.Map; /** * */ -public class LongTerms extends InternalTerms { +public class LongTerms extends InternalTerms { public static final Type TYPE = new Type("terms", "lterms"); @@ -155,8 +156,11 @@ public class LongTerms extends InternalTerms { LongTerms() {} // for serialization - public LongTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public LongTerms(String name, Terms.Order order, @Nullable ValueFormatter formatter, int requiredSize, int shardSize, long minDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List reducers, Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, + metaData); this.formatter = formatter; } @@ -166,8 +170,23 @@ public class LongTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - return new LongTerms(name, order, formatter, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public LongTerms create(List buckets) { + return new LongTerms(this.name, this.order, this.formatter, this.requiredSize, this.shardSize, this.minDocCount, buckets, + this.showTermDocCountError, this.docCountError, this.otherDocCount, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.term, prototype.docCount, aggregations, prototype.showDocCountError, prototype.docCountError, + prototype.formatter); + } + + @Override + protected LongTerms create(String name, List buckets, + long docCountError, long otherDocCount, InternalTerms prototype) { + return new LongTerms(name, prototype.order, ((LongTerms) prototype).formatter, prototype.requiredSize, prototype.shardSize, + prototype.minDocCount, buckets, prototype.showTermDocCountError, docCountError, otherDocCount, prototype.reducers(), + prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java index a570b06360f..ea32e388fe6 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/LongTermsAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude.LongFilter; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormat; @@ -39,6 +40,7 @@ import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.Arrays; import java.util.Collections; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,10 @@ public class LongTermsAggregator extends TermsAggregator { private LongFilter longFilter; public LongTermsAggregator(String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @Nullable ValueFormat format, - Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, bucketCountThresholds, order, subAggCollectMode, metaData); + Terms.Order order, BucketCountThresholds bucketCountThresholds, AggregationContext aggregationContext, Aggregator parent, + SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, IncludeExclude.LongFilter longFilter, + List reducers, Map metaData) throws IOException { + super(name, factories, aggregationContext, parent, bucketCountThresholds, order, subAggCollectMode, reducers, metaData); this.valuesSource = valuesSource; this.showTermDocCountError = showTermDocCountError; this.formatter = format != null ? format.formatter() : null; @@ -157,13 +161,16 @@ public class LongTermsAggregator extends TermsAggregator { list[i].docCountError = 0; } - return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, metaData()); + return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), + metaData()); } - @Override public InternalAggregation buildEmptyAggregation() { - return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Collections.emptyList(), showTermDocCountError, 0, 0, metaData()); + return new LongTerms(name, order, formatter, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Collections. emptyList(), showTermDocCountError, 0, 0, + reducers(), metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java index 7caec199df3..ee458acdf13 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTerms.java @@ -27,6 +27,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.BucketStreamContext; import org.elasticsearch.search.aggregations.bucket.BucketStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -37,7 +38,7 @@ import java.util.Map; /** * */ -public class StringTerms extends InternalTerms { +public class StringTerms extends InternalTerms { public static final InternalAggregation.Type TYPE = new Type("terms", "sterms"); @@ -73,7 +74,6 @@ public class StringTerms extends InternalTerms { BucketStreams.registerStream(BUCKET_STREAM, TYPE.stream()); } - public static class Bucket extends InternalTerms.Bucket { BytesRef termBytes; @@ -148,10 +148,14 @@ public class StringTerms extends InternalTerms { } } - StringTerms() {} // for serialization + StringTerms() { + } // for serialization - public StringTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public StringTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, + List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, + List reducers, Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, reducers, + metaData); } @Override @@ -160,8 +164,21 @@ public class StringTerms extends InternalTerms { } @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - return new StringTerms(name, order, requiredSize, shardSize, minDocCount, buckets, showTermDocCountError, docCountError, otherDocCount, metaData); + public StringTerms create(List buckets) { + return new StringTerms(this.name, this.order, this.requiredSize, this.shardSize, this.minDocCount, buckets, + this.showTermDocCountError, this.docCountError, this.otherDocCount, this.reducers(), this.metaData); + } + + @Override + public Bucket createBucket(InternalAggregations aggregations, Bucket prototype) { + return new Bucket(prototype.termBytes, prototype.docCount, aggregations, prototype.showDocCountError, prototype.docCountError); + } + + @Override + protected StringTerms create(String name, List buckets, + long docCountError, long otherDocCount, InternalTerms prototype) { + return new StringTerms(name, prototype.order, prototype.requiredSize, prototype.shardSize, prototype.minDocCount, buckets, + prototype.showTermDocCountError, docCountError, otherDocCount, prototype.reducers(), prototype.getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java index d625e3b9954..6f80142da27 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/StringTermsAggregator.java @@ -31,11 +31,13 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.bucket.terms.support.BucketPriorityQueue; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import java.io.IOException; import java.util.Arrays; +import java.util.List; import java.util.Map; /** @@ -49,9 +51,12 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { public StringTermsAggregator(String name, AggregatorFactories factories, ValuesSource valuesSource, Terms.Order order, BucketCountThresholds bucketCountThresholds, - IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, Map metaData) throws IOException { + IncludeExclude.StringFilter includeExclude, AggregationContext aggregationContext, + Aggregator parent, SubAggCollectionMode collectionMode, boolean showTermDocCountError, List reducers, + Map metaData) throws IOException { - super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, metaData); + super(name, factories, aggregationContext, parent, order, bucketCountThresholds, collectionMode, showTermDocCountError, reducers, + metaData); this.valuesSource = valuesSource; this.includeExclude = includeExclude; bucketOrds = new BytesRefHash(1, aggregationContext.bigArrays()); @@ -158,7 +163,9 @@ public class StringTermsAggregator extends AbstractStringTermsAggregator { bucket.docCountError = 0; } - return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, metaData()); + return new StringTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), + bucketCountThresholds.getMinDocCount(), Arrays.asList(list), showTermDocCountError, 0, otherDocCount, reducers(), + metaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java index 4cfe549a452..165675d0ddf 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregator.java @@ -28,11 +28,13 @@ import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.BucketsAggregator; import org.elasticsearch.search.aggregations.bucket.terms.InternalOrder.Aggregation; import org.elasticsearch.search.aggregations.bucket.terms.InternalOrder.CompoundOrder; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.AggregationPath; import java.io.IOException; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -135,8 +137,8 @@ public abstract class TermsAggregator extends BucketsAggregator { protected final Set aggsUsedForSorting = new HashSet<>(); protected final SubAggCollectionMode collectMode; - public TermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, BucketCountThresholds bucketCountThresholds, Terms.Order order, SubAggCollectionMode collectMode, Map metaData) throws IOException { - super(name, factories, context, parent, metaData); + public TermsAggregator(String name, AggregatorFactories factories, AggregationContext context, Aggregator parent, BucketCountThresholds bucketCountThresholds, Terms.Order order, SubAggCollectionMode collectMode, List reducers, Map metaData) throws IOException { + super(name, factories, context, parent, reducers, metaData); this.bucketCountThresholds = bucketCountThresholds; this.order = InternalOrder.validate(order, this); this.collectMode = collectMode; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java index 3fa99d2b7fd..4056409517e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorFactory.java @@ -19,22 +19,22 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; -import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.NonCollectingAggregator; import org.elasticsearch.search.aggregations.bucket.terms.support.IncludeExclude; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,9 +49,11 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, + boolean showTermDocCountError, List reducers, Map metaData) throws IOException { final IncludeExclude.StringFilter filter = includeExclude == null ? null : includeExclude.convertToStringFilter(); - return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return new StringTermsAggregator(name, factories, valuesSource, order, bucketCountThresholds, filter, + aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -65,9 +67,9 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); - return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return new GlobalOrdinalsStringTermsAggregator(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -81,9 +83,9 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException { final IncludeExclude.OrdinalsFilter filter = includeExclude == null ? null : includeExclude.convertToOrdinalsFilter(); - return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return new GlobalOrdinalsStringTermsAggregator.WithHash(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, filter, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -96,11 +98,12 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + AggregationContext aggregationContext, Aggregator parent, SubAggCollectionMode subAggCollectMode, + boolean showTermDocCountError, List reducers, Map metaData) throws IOException { if (includeExclude != null || factories.count() > 0) { - return GLOBAL_ORDINALS.create(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return GLOBAL_ORDINALS.create(name, factories, valuesSource, order, bucketCountThresholds, includeExclude, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } - return new GlobalOrdinalsStringTermsAggregator.LowCardinality(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, aggregationContext, parent, subAggCollectMode, showTermDocCountError, metaData); + return new GlobalOrdinalsStringTermsAggregator.LowCardinality(name, factories, (ValuesSource.Bytes.WithOrdinals.FieldData) valuesSource, order, bucketCountThresholds, aggregationContext, parent, subAggCollectMode, showTermDocCountError, reducers, metaData); } @Override @@ -115,7 +118,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException; + SubAggCollectionMode subAggCollectMode, boolean showTermDocCountError, List reducers, Map metaData) throws IOException; abstract boolean needsGlobalOrdinals(); @@ -155,9 +158,11 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds.getRequiredSize(), bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), metaData); - return new NonCollectingAggregator(name, aggregationContext, parent, factories, metaData) { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + final InternalAggregation aggregation = new UnmappedTerms(name, order, bucketCountThresholds.getRequiredSize(), + bucketCountThresholds.getShardSize(), bucketCountThresholds.getMinDocCount(), reducers, metaData); + return new NonCollectingAggregator(name, aggregationContext, parent, factories, reducers, metaData) { { // even in the case of an unmapped aggregator, validate the order InternalOrder.validate(order, this); @@ -170,7 +175,8 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, aggregationContext, parent); } @@ -220,7 +226,7 @@ public class TermsAggregatorFactory extends ValuesSourceAggregatorFactory { */ public TermsBuilder include(String regex) { if (includeTerms != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of strings or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of strings or a regex, not both"); } this.includePattern = regex; return this; @@ -104,7 +103,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder include(String [] terms) { if (includePattern != null) { - throw new ElasticsearchIllegalArgumentException("include clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("include clause must be an array of exact values or a regex, not both"); } this.includeTerms = terms; return this; @@ -115,7 +114,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder include(long [] terms) { if (includePattern != null) { - throw new ElasticsearchIllegalArgumentException("include clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("include clause must be an array of exact values or a regex, not both"); } this.includeTerms = longsArrToStringArr(terms); return this; @@ -135,7 +134,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder include(double [] terms) { if (includePattern != null) { - throw new ElasticsearchIllegalArgumentException("include clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("include clause must be an array of exact values or a regex, not both"); } this.includeTerms = doubleArrToStringArr(terms); return this; @@ -157,7 +156,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(String regex) { if (excludeTerms != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludePattern = regex; return this; @@ -168,7 +167,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(String [] terms) { if (excludePattern != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludeTerms = terms; return this; @@ -180,7 +179,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(long [] terms) { if (excludePattern != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludeTerms = longsArrToStringArr(terms); return this; @@ -191,7 +190,7 @@ public class TermsBuilder extends ValuesSourceAggregationBuilder { */ public TermsBuilder exclude(double [] terms) { if (excludePattern != null) { - throw new ElasticsearchIllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); + throw new IllegalArgumentException("exclude clause must be an array of exact values or a regex, not both"); } this.excludeTerms = doubleArrToStringArr(terms); return this; diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java index 6ae88f63c57..63166bca78c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsParametersParser.java @@ -56,7 +56,8 @@ public class TermsParametersParser extends AbstractTermsParametersParser { if ("order".equals(currentFieldName)) { this.orderElements = Collections.singletonList(parseOrderParam(aggregationName, parser, context)); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { if ("order".equals(currentFieldName)) { @@ -66,18 +67,21 @@ public class TermsParametersParser extends AbstractTermsParametersParser { OrderElement orderParam = parseOrderParam(aggregationName, parser, context); orderElements.add(orderParam); } else { - throw new SearchParseException(context, "Order elements must be of type object in [" + aggregationName + "]."); + throw new SearchParseException(context, "Order elements must be of type object in [" + aggregationName + "].", + parser.getTokenLocation()); } } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if (SHOW_TERM_DOC_COUNT_ERROR.match(currentFieldName)) { showTermDocCountError = parser.booleanValue(); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + + "].", parser.getTokenLocation()); } } @@ -96,14 +100,17 @@ public class TermsParametersParser extends AbstractTermsParametersParser { } else if ("desc".equalsIgnoreCase(dir)) { orderAsc = false; } else { - throw new SearchParseException(context, "Unknown terms order direction [" + dir + "] in terms aggregation [" + aggregationName + "]"); + throw new SearchParseException(context, "Unknown terms order direction [" + dir + "] in terms aggregation [" + + aggregationName + "]", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " for [order] in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " for [order] in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (orderKey == null) { - throw new SearchParseException(context, "Must specify at least one field for [order] in [" + aggregationName + "]."); + throw new SearchParseException(context, "Must specify at least one field for [order] in [" + aggregationName + "].", + parser.getTokenLocation()); } else { orderParam = new OrderElement(orderKey, orderAsc); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java index 2cbdd4eabdc..ab7588da1c3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/UnmappedTerms.java @@ -23,6 +23,8 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.Collections; @@ -32,7 +34,7 @@ import java.util.Map; /** * */ -public class UnmappedTerms extends InternalTerms { +public class UnmappedTerms extends InternalTerms { public static final Type TYPE = new Type("terms", "umterms"); @@ -54,8 +56,9 @@ public class UnmappedTerms extends InternalTerms { UnmappedTerms() {} // for serialization - public UnmappedTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, Map metaData) { - super(name, order, requiredSize, shardSize, minDocCount, BUCKETS, false, 0, 0, metaData); + public UnmappedTerms(String name, Terms.Order order, int requiredSize, int shardSize, long minDocCount, List reducers, + Map metaData) { + super(name, order, requiredSize, shardSize, minDocCount, BUCKETS, false, 0, 0, reducers, metaData); } @Override @@ -63,6 +66,21 @@ public class UnmappedTerms extends InternalTerms { return TYPE; } + @Override + public UnmappedTerms create(List buckets) { + return new UnmappedTerms(this.name, this.order, this.requiredSize, this.shardSize, this.minDocCount, this.reducers(), this.metaData); + } + + @Override + public InternalTerms.Bucket createBucket(InternalAggregations aggregations, InternalTerms.Bucket prototype) { + throw new UnsupportedOperationException("not supported for UnmappedTerms"); + } + + @Override + protected UnmappedTerms create(String name, List buckets, long docCountError, long otherDocCount, InternalTerms prototype) { + throw new UnsupportedOperationException("not supported for UnmappedTerms"); + } + @Override protected void doReadFrom(StreamInput in) throws IOException { this.docCountError = 0; @@ -81,7 +99,7 @@ public class UnmappedTerms extends InternalTerms { } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { for (InternalAggregation agg : aggregations) { if (!(agg instanceof UnmappedTerms)) { return agg.reduce(aggregations, reduceContext); @@ -90,11 +108,6 @@ public class UnmappedTerms extends InternalTerms { return this; } - @Override - protected InternalTerms newAggregation(String name, List buckets, boolean showTermDocCountError, long docCountError, long otherDocCount, Map metaData) { - throw new UnsupportedOperationException("How did you get there?"); - } - @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.field(InternalTerms.DOC_COUNT_ERROR_UPPER_BOUND_FIELD_NAME, docCountError); diff --git a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java index cb1fc48be00..c0488011a35 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java +++ b/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/support/IncludeExclude.java @@ -34,7 +34,6 @@ import org.apache.lucene.util.automaton.ByteRunAutomaton; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.aggregations.support.ValuesSource; @@ -293,7 +292,7 @@ public class IncludeExclude { RegExp excludePattern = exclude != null ? new RegExp(exclude) : null; if (includePattern != null || excludePattern != null) { if (includeValues != null || excludeValues != null) { - throw new ElasticsearchIllegalArgumentException("Can only use regular expression include/exclude or a set of values, not both"); + throw new IllegalArgumentException("Can only use regular expression include/exclude or a set of values, not both"); } return new IncludeExclude(includePattern, excludePattern); } else if (includeValues != null || excludeValues != null) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java index e3a9476e56a..8facf4c1ae5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalMetricsAggregation.java @@ -20,14 +20,16 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import java.util.List; import java.util.Map; public abstract class InternalMetricsAggregation extends InternalAggregation { protected InternalMetricsAggregation() {} // for serialization - protected InternalMetricsAggregation(String name, Map metaData) { - super(name, metaData); + protected InternalMetricsAggregation(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java index 2f94c38d464..d9d0aeeb1a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalNumericMetricsAggregation.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.util.List; @@ -35,8 +35,8 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA protected SingleValue() {} - protected SingleValue(String name, Map metaData) { - super(name, metaData); + protected SingleValue(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } @Override @@ -55,7 +55,7 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA } else if (path.size() == 1 && "value".equals(path.get(0))) { return value(); } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } @@ -65,8 +65,8 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA protected MultiValue() {} - protected MultiValue(String name, Map metaData) { - super(name, metaData); + protected MultiValue(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } public abstract double value(String name); @@ -86,15 +86,15 @@ public abstract class InternalNumericMetricsAggregation extends InternalMetricsA } else if (path.size() == 1) { return value(path.get(0)); } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } } private InternalNumericMetricsAggregation() {} // for serialization - private InternalNumericMetricsAggregation(String name, Map metaData) { - super(name, metaData); + private InternalNumericMetricsAggregation(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java index f29e063d61a..f3160cf464c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/MetricsAggregator.java @@ -22,14 +22,17 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorBase; import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; public abstract class MetricsAggregator extends AggregatorBase { - protected MetricsAggregator(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, AggregatorFactories.EMPTY, context, parent, metaData); + protected MetricsAggregator(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, AggregatorFactories.EMPTY, context, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java index 66adf3ed74e..6342df383ed 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericMetricsAggregator.java @@ -19,9 +19,11 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -29,14 +31,16 @@ import java.util.Map; */ public abstract class NumericMetricsAggregator extends MetricsAggregator { - private NumericMetricsAggregator(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + private NumericMetricsAggregator(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); } public static abstract class SingleValue extends NumericMetricsAggregator { - protected SingleValue(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + protected SingleValue(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); } public abstract double metric(long owningBucketOrd); @@ -44,8 +48,9 @@ public abstract class NumericMetricsAggregator extends MetricsAggregator { public static abstract class MultiValue extends NumericMetricsAggregator { - protected MultiValue(String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + protected MultiValue(String name, AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); } public abstract boolean hasMetric(String name); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java index ae9e6844e2f..6847a9a5b3d 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/NumericValuesSourceMetricsAggregatorParser.java @@ -58,7 +58,8 @@ public abstract class NumericValuesSourceMetricsAggregatorParser metaData) throws IOException { - super(name,context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -103,12 +105,12 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= sums.size()) { return buildEmptyAggregation(); } - return new InternalAvg(name, sums.get(bucket), counts.get(bucket), formatter, metaData()); + return new InternalAvg(name, sums.get(bucket), counts.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalAvg(name, 0.0, 0l, formatter, metaData()); + return new InternalAvg(name, 0.0, 0l, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -118,13 +120,15 @@ public class AvgAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new AvgAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new AvgAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new AvgAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new AvgAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java index 15980b45710..3f4af6cdf5c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/avg/InternalAvg.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -57,8 +58,9 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i InternalAvg() {} // for serialization - public InternalAvg(String name, double sum, long count, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalAvg(String name, double sum, long count, @Nullable ValueFormatter formatter, List reducers, + Map metaData) { + super(name, reducers, metaData); this.sum = sum; this.count = count; this.valueFormatter = formatter; @@ -80,14 +82,14 @@ public class InternalAvg extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalAvg reduce(List aggregations, ReduceContext reduceContext) { + public InternalAvg doReduce(List aggregations, ReduceContext reduceContext) { long count = 0; double sum = 0; for (InternalAggregation aggregation : aggregations) { count += ((InternalAvg) aggregation).count; sum += ((InternalAvg) aggregation).sum; } - return new InternalAvg(getName(), sum, count, valueFormatter, getMetaData()); + return new InternalAvg(getName(), sum, count, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java index e4c2acce93c..e00782687c9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregator.java @@ -42,11 +42,13 @@ import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -66,8 +68,8 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue private ValueFormatter formatter; public CardinalityAggregator(String name, ValuesSource valuesSource, boolean rehash, int precision, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.rehash = rehash; this.precision = precision; @@ -156,12 +158,12 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue // this Aggregator (and its HLL++ counters) is released. HyperLogLogPlusPlus copy = new HyperLogLogPlusPlus(precision, BigArrays.NON_RECYCLING_INSTANCE, 1); copy.merge(0, counts, owningBucketOrdinal); - return new InternalCardinality(name, copy, formatter, metaData()); + return new InternalCardinality(name, copy, formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalCardinality(name, null, formatter, metaData()); + return new InternalCardinality(name, null, formatter, reducers(), metaData()); } @Override @@ -188,7 +190,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public void close() throws ElasticsearchException { + public void close() { // no-op } } @@ -218,7 +220,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public void close() throws ElasticsearchException { + public void close() { // no-op } @@ -295,7 +297,7 @@ public class CardinalityAggregator extends NumericMetricsAggregator.SingleValue } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(visitedOrds); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java index 2d063dd5bd9..d2341bb2647 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityAggregatorFactory.java @@ -22,12 +22,14 @@ package org.elasticsearch.search.aggregations.metrics.cardinality; import org.elasticsearch.search.aggregations.AggregationExecutionException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.bucket.SingleBucketAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory { @@ -46,16 +48,19 @@ final class CardinalityAggregatorFactory extends ValuesSourceAggregatorFactory metaData) throws IOException { - return new CardinalityAggregator(name, null, true, precision(parent), config.formatter(), context, parent, metaData); + protected Aggregator createUnmapped(AggregationContext context, Aggregator parent, List reducers, Map metaData) + throws IOException { + return new CardinalityAggregator(name, null, true, precision(parent), config.formatter(), context, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource valuesSource, AggregationContext context, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { if (!(valuesSource instanceof ValuesSource.Numeric) && !rehash) { throw new AggregationExecutionException("Turning off rehashing for cardinality aggregation [" + name + "] on non-numeric values in not allowed"); } - return new CardinalityAggregator(name, valuesSource, rehash, precision(parent), config.formatter(), context, parent, metaData); + return new CardinalityAggregator(name, valuesSource, rehash, precision(parent), config.formatter(), context, parent, reducers, + metaData); } /* diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java index 57582476dc6..ee516e64313 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/CardinalityParser.java @@ -62,10 +62,11 @@ public class CardinalityParser implements Aggregator.Parser { } else if (PRECISION_THRESHOLD.match(currentFieldName)) { precisionThreshold = parser.longValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + name + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + name + "]: [" + currentFieldName + + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + name + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + name + "].", parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java index 38661326818..b21bd5224c3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/HyperLogLogPlusPlus.java @@ -406,7 +406,7 @@ public final class HyperLogLogPlusPlus implements Releasable { } @Override - public void close() throws ElasticsearchException { + public void close() { Releasables.close(runLens, hashSet.sizes); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java index 9998d3eae10..5517702142c 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/cardinality/InternalCardinality.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -53,8 +54,9 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation private HyperLogLogPlusPlus counts; - InternalCardinality(String name, HyperLogLogPlusPlus counts, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + InternalCardinality(String name, HyperLogLogPlusPlus counts, @Nullable ValueFormatter formatter, List reducers, + Map metaData) { + super(name, reducers, metaData); this.counts = counts; this.valueFormatter = formatter; } @@ -99,14 +101,14 @@ public final class InternalCardinality extends InternalNumericMetricsAggregation } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { InternalCardinality reduced = null; for (InternalAggregation aggregation : aggregations) { final InternalCardinality cardinality = (InternalCardinality) aggregation; if (cardinality.counts != null) { if (reduced == null) { reduced = new InternalCardinality(name, new HyperLogLogPlusPlus(cardinality.counts.precision(), - BigArrays.NON_RECYCLING_INSTANCE, 1), this.valueFormatter, getMetaData()); + BigArrays.NON_RECYCLING_INSTANCE, 1), this.valueFormatter, reducers(), getMetaData()); } reduced.merge(cardinality); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java index 44e7fd195c0..464d0a339a8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsAggregator.java @@ -30,12 +30,14 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import java.io.IOException; +import java.util.List; import java.util.Map; public final class GeoBoundsAggregator extends MetricsAggregator { @@ -49,9 +51,10 @@ public final class GeoBoundsAggregator extends MetricsAggregator { DoubleArray negLefts; DoubleArray negRights; - protected GeoBoundsAggregator(String name, AggregationContext aggregationContext, - Aggregator parent, ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, Map metaData) throws IOException { - super(name, aggregationContext, parent, metaData); + protected GeoBoundsAggregator(String name, AggregationContext aggregationContext, Aggregator parent, + ValuesSource.GeoPoint valuesSource, boolean wrapLongitude, List reducers, + Map metaData) throws IOException { + super(name, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.wrapLongitude = wrapLongitude; if (valuesSource != null) { @@ -149,13 +152,13 @@ public final class GeoBoundsAggregator extends MetricsAggregator { double posRight = posRights.get(owningBucketOrdinal); double negLeft = negLefts.get(owningBucketOrdinal); double negRight = negRights.get(owningBucketOrdinal); - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, metaData()); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { return new InternalGeoBounds(name, Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.POSITIVE_INFINITY, - Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, wrapLongitude, metaData()); + Double.NEGATIVE_INFINITY, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, wrapLongitude, reducers(), metaData()); } @Override @@ -173,14 +176,15 @@ public final class GeoBoundsAggregator extends MetricsAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new GeoBoundsAggregator(name, aggregationContext, parent, null, wrapLongitude, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new GeoBoundsAggregator(name, aggregationContext, parent, null, wrapLongitude, reducers, metaData); } @Override protected Aggregator doCreateInternal(ValuesSource.GeoPoint valuesSource, AggregationContext aggregationContext, - Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new GeoBoundsAggregator(name, aggregationContext, parent, valuesSource, wrapLongitude, metaData); + Aggregator parent, boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new GeoBoundsAggregator(name, aggregationContext, parent, valuesSource, wrapLongitude, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java index d0f7da083c5..de1fea2f504 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/GeoBoundsParser.java @@ -56,10 +56,12 @@ public class GeoBoundsParser implements Aggregator.Parser { if ("wrap_longitude".equals(currentFieldName) || "wrapLongitude".equals(currentFieldName)) { wrapLongitude = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in aggregation [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } return new GeoBoundsAggregator.Factory(aggregationName, vsParser.config(), wrapLongitude); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java index a59abe1ad2b..0da4e6bfcc7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/geobounds/InternalGeoBounds.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.geobounds; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.List; @@ -56,8 +56,9 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo } InternalGeoBounds(String name, double top, double bottom, double posLeft, double posRight, - double negLeft, double negRight, boolean wrapLongitude, Map metaData) { - super(name, metaData); + double negLeft, double negRight, boolean wrapLongitude, + List reducers, Map metaData) { + super(name, reducers, metaData); this.top = top; this.bottom = bottom; this.posLeft = posLeft; @@ -73,7 +74,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { double top = Double.NEGATIVE_INFINITY; double bottom = Double.POSITIVE_INFINITY; double posLeft = Double.POSITIVE_INFINITY; @@ -103,7 +104,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo negRight = bounds.negRight; } } - return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, getMetaData()); + return new InternalGeoBounds(name, top, bottom, posLeft, posRight, negLeft, negRight, wrapLongitude, reducers(), getMetaData()); } @Override @@ -123,7 +124,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo case "right": return boundingBox.bottomRight.lon(); default: - throw new ElasticsearchIllegalArgumentException("Found unknown path element [" + bBoxSide + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Found unknown path element [" + bBoxSide + "] in [" + getName() + "]"); } } else if (path.size() == 2) { BoundingBox boundingBox = resolveBoundingBox(); @@ -137,7 +138,7 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo cornerPoint = boundingBox.bottomRight; break; default: - throw new ElasticsearchIllegalArgumentException("Found unknown path element [" + cornerString + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Found unknown path element [" + cornerString + "] in [" + getName() + "]"); } String latLonString = path.get(1); switch (latLonString) { @@ -146,10 +147,10 @@ public class InternalGeoBounds extends InternalMetricsAggregation implements Geo case "lon": return cornerPoint.lon(); default: - throw new ElasticsearchIllegalArgumentException("Found unknown path element [" + latLonString + "] in [" + getName() + "]"); + throw new IllegalArgumentException("Found unknown path element [" + latLonString + "] in [" + getName() + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java index a3b938cf363..b2f103fc488 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/InternalMax.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -56,8 +57,8 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i InternalMax() {} // for serialization - public InternalMax(String name, double max, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalMax(String name, double max, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); this.valueFormatter = formatter; this.max = max; } @@ -78,12 +79,12 @@ public class InternalMax extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalMax reduce(List aggregations, ReduceContext reduceContext) { + public InternalMax doReduce(List aggregations, ReduceContext reduceContext) { double max = Double.NEGATIVE_INFINITY; for (InternalAggregation aggregation : aggregations) { max = Math.max(max, ((InternalMax) aggregation).max); } - return new InternalMax(name, max, valueFormatter, getMetaData()); + return new InternalMax(name, max, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java index 88edddc286c..7ade492660e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/max/MaxAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +39,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -51,8 +53,9 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray maxes; public MaxAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -71,7 +74,7 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { final LeafBucketCollector sub) throws IOException { if (valuesSource == null) { return LeafBucketCollector.NO_OP_COLLECTOR; - } + } final BigArrays bigArrays = context.bigArrays(); final SortedNumericDoubleValues allValues = valuesSource.doubleValues(ctx); final NumericDoubleValues values = MultiValueMode.MAX.select(allValues, Double.NEGATIVE_INFINITY); @@ -103,12 +106,12 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= maxes.size()) { return buildEmptyAggregation(); } - return new InternalMax(name, maxes.get(bucket), formatter, metaData()); + return new InternalMax(name, maxes.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalMax(name, Double.NEGATIVE_INFINITY, formatter, metaData()); + return new InternalMax(name, Double.NEGATIVE_INFINITY, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -118,13 +121,15 @@ public class MaxAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new MaxAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new MaxAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new MaxAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new MaxAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java index 7beb9c5a76a..d6269602169 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/InternalMin.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -57,8 +58,8 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i InternalMin() {} // for serialization - public InternalMin(String name, double min, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalMin(String name, double min, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); this.min = min; this.valueFormatter = formatter; } @@ -79,12 +80,12 @@ public class InternalMin extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalMin reduce(List aggregations, ReduceContext reduceContext) { + public InternalMin doReduce(List aggregations, ReduceContext reduceContext) { double min = Double.POSITIVE_INFINITY; for (InternalAggregation aggregation : aggregations) { min = Math.min(min, ((InternalMin) aggregation).min); } - return new InternalMin(getName(), min, this.valueFormatter, getMetaData()); + return new InternalMin(getName(), min, this.valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java index 438272e2bc1..cf832cabe1f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/min/MinAggregator.java @@ -31,6 +31,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +39,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -51,8 +53,9 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray mins; public MinAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { mins = context.bigArrays().newDoubleArray(1, false); @@ -103,12 +106,12 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= mins.size()) { return buildEmptyAggregation(); } - return new InternalMin(name, mins.get(bucket), formatter, metaData()); + return new InternalMin(name, mins.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalMin(name, Double.POSITIVE_INFINITY, formatter, metaData()); + return new InternalMin(name, Double.POSITIVE_INFINITY, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -118,13 +121,15 @@ public class MinAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new MinAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new MinAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new MinAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new MinAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java index b4916cdd8c9..081e5e07fda 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractInternalPercentiles.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.percentiles; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -44,8 +44,9 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega AbstractInternalPercentiles() {} // for serialization public AbstractInternalPercentiles(String name, double[] keys, TDigestState state, boolean keyed, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, metaData); + super(name, reducers, metaData); this.keys = keys; this.state = state; this.keyed = keyed; @@ -60,7 +61,7 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega public abstract double value(double key); @Override - public AbstractInternalPercentiles reduce(List aggregations, ReduceContext reduceContext) { + public AbstractInternalPercentiles doReduce(List aggregations, ReduceContext reduceContext) { TDigestState merged = null; for (InternalAggregation aggregation : aggregations) { final AbstractInternalPercentiles percentiles = (AbstractInternalPercentiles) aggregation; @@ -69,10 +70,11 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega } merged.add(percentiles.state); } - return createReduced(getName(), keys, merged, keyed, getMetaData()); + return createReduced(getName(), keys, merged, keyed, reducers(), getMetaData()); } - protected abstract AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, Map metaData); + protected abstract AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, + List reducers, Map metaData); @Override protected void doReadFrom(StreamInput in) throws IOException { @@ -80,7 +82,7 @@ abstract class AbstractInternalPercentiles extends InternalNumericMetricsAggrega if (in.getVersion().before(Version.V_1_2_0)) { final byte id = in.readByte(); if (id != 0) { - throw new ElasticsearchIllegalArgumentException("Unexpected percentiles aggregator id [" + id + "]"); + throw new IllegalArgumentException("Unexpected percentiles aggregator id [" + id + "]"); } } keys = new double[in.readInt()]; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java index 31a097f0b47..a73639a3d7f 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesAggregator.java @@ -31,11 +31,13 @@ import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; public abstract class AbstractPercentilesAggregator extends NumericMetricsAggregator.MultiValue { @@ -53,8 +55,9 @@ public abstract class AbstractPercentilesAggregator extends NumericMetricsAggreg public AbstractPercentilesAggregator(String name, ValuesSource.Numeric valuesSource, AggregationContext context, Aggregator parent, double[] keys, double compression, boolean keyed, - @Nullable ValueFormatter formatter, Map metaData) throws IOException { - super(name, context, parent, metaData); + @Nullable ValueFormatter formatter, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.keyed = keyed; this.formatter = formatter; diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java index 7fee395966b..8d12c9bf1a7 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/AbstractPercentilesParser.java @@ -69,22 +69,26 @@ public abstract class AbstractPercentilesParser implements Aggregator.Parser { keys = values.toArray(); Arrays.sort(keys); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_BOOLEAN) { if ("keyed".equals(currentFieldName)) { keyed = parser.booleanValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.VALUE_NUMBER) { if ("compression".equals(currentFieldName)) { compression = parser.doubleValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } return buildFactory(context, aggregationName, vsParser.config(), keys, compression, keyed); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java index b096d479243..291db0451b0 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentileRanks.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.Iterator; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,9 @@ public class InternalPercentileRanks extends AbstractInternalPercentiles impleme InternalPercentileRanks() {} // for serialization public InternalPercentileRanks(String name, double[] cdfValues, TDigestState state, boolean keyed, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, cdfValues, state, keyed, formatter, metaData); + super(name, cdfValues, state, keyed, formatter, reducers, metaData); } @Override @@ -78,8 +81,9 @@ public class InternalPercentileRanks extends AbstractInternalPercentiles impleme } @Override - protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, Map metaData) { - return new InternalPercentileRanks(name, keys, merged, keyed, valueFormatter, metaData); + protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, + List reducers, Map metaData) { + return new InternalPercentileRanks(name, keys, merged, keyed, valueFormatter, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java index 94beb90a911..71360dad8fb 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/InternalPercentiles.java @@ -24,10 +24,12 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; import java.util.Iterator; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,9 @@ public class InternalPercentiles extends AbstractInternalPercentiles implements InternalPercentiles() {} // for serialization public InternalPercentiles(String name, double[] percents, TDigestState state, boolean keyed, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, percents, state, keyed, formatter, metaData); + super(name, percents, state, keyed, formatter, reducers, metaData); } @Override @@ -78,8 +81,9 @@ public class InternalPercentiles extends AbstractInternalPercentiles implements } @Override - protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, Map metaData) { - return new InternalPercentiles(name, keys, merged, keyed, valueFormatter, metaData); + protected AbstractInternalPercentiles createReduced(String name, double[] keys, TDigestState merged, boolean keyed, + List reducers, Map metaData) { + return new InternalPercentiles(name, keys, merged, keyed, valueFormatter, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java index 0383e33e8a7..380482b8ab3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksAggregator.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -30,6 +31,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -37,10 +39,10 @@ import java.util.Map; */ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { - public PercentileRanksAggregator(String name, Numeric valuesSource, AggregationContext context, - Aggregator parent, double[] percents, double compression, boolean keyed, @Nullable ValueFormatter formatter, - Map metaData) throws IOException { - super(name, valuesSource, context, parent, percents, compression, keyed, formatter, metaData); + public PercentileRanksAggregator(String name, Numeric valuesSource, AggregationContext context, Aggregator parent, double[] percents, + double compression, boolean keyed, @Nullable ValueFormatter formatter, List reducers, Map metaData) + throws IOException { + super(name, valuesSource, context, parent, percents, compression, keyed, formatter, reducers, metaData); } @Override @@ -49,13 +51,13 @@ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { if (state == null) { return buildEmptyAggregation(); } else { - return new InternalPercentileRanks(name, keys, state, keyed, formatter, metaData()); + return new InternalPercentileRanks(name, keys, state, keyed, formatter, reducers(), metaData()); } } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalPercentileRanks(name, keys, new TDigestState(compression), keyed, formatter, metaData()); + return new InternalPercentileRanks(name, keys, new TDigestState(compression), keyed, formatter, reducers(), metaData()); } @Override @@ -83,15 +85,18 @@ public class PercentileRanksAggregator extends AbstractPercentilesAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { return new PercentileRanksAggregator(name, null, aggregationContext, parent, values, compression, keyed, config.formatter(), + reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new PercentileRanksAggregator(name, valuesSource, aggregationContext, parent, values, compression, - keyed, config.formatter(), metaData); + keyed, config.formatter(), reducers, metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java index d0dd632314e..7d27c5556da 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentileRanksParser.java @@ -46,7 +46,7 @@ public class PercentileRanksParser extends AbstractPercentilesParser { @Override protected AggregatorFactory buildFactory(SearchContext context, String aggregationName, ValuesSourceConfig valuesSourceConfig, double[] keys, double compression, boolean keyed) { if (keys == null) { - throw new SearchParseException(context, "Missing token values in [" + aggregationName + "]."); + throw new SearchParseException(context, "Missing token values in [" + aggregationName + "].", null); } return new PercentileRanksAggregator.Factory(aggregationName, valuesSourceConfig, keys, compression, keyed); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java index 4dd99b73cd9..2a42dc94620 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/percentiles/PercentilesAggregator.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.percentiles.tdigest.TDigestState; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSource.Numeric; @@ -30,6 +31,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -38,9 +40,10 @@ import java.util.Map; public class PercentilesAggregator extends AbstractPercentilesAggregator { public PercentilesAggregator(String name, Numeric valuesSource, AggregationContext context, - Aggregator parent, double[] percents, double compression, boolean keyed, @Nullable ValueFormatter formatter, + Aggregator parent, double[] percents, + double compression, boolean keyed, @Nullable ValueFormatter formatter, List reducers, Map metaData) throws IOException { - super(name, valuesSource, context, parent, percents, compression, keyed, formatter, metaData); + super(name, valuesSource, context, parent, percents, compression, keyed, formatter, reducers, metaData); } @Override @@ -49,7 +52,7 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { if (state == null) { return buildEmptyAggregation(); } else { - return new InternalPercentiles(name, keys, state, keyed, formatter, metaData()); + return new InternalPercentiles(name, keys, state, keyed, formatter, reducers(), metaData()); } } @@ -65,7 +68,7 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { @Override public InternalAggregation buildEmptyAggregation() { - return new InternalPercentiles(name, keys, new TDigestState(compression), keyed, formatter, metaData()); + return new InternalPercentiles(name, keys, new TDigestState(compression), keyed, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -83,15 +86,18 @@ public class PercentilesAggregator extends AbstractPercentilesAggregator { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { return new PercentilesAggregator(name, null, aggregationContext, parent, percents, compression, keyed, config.formatter(), + reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { return new PercentilesAggregator(name, valuesSource, aggregationContext, parent, percents, compression, - keyed, config.formatter(), metaData); + keyed, config.formatter(), reducers, metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java index a2e03a3b460..4f80befc440 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/InternalScriptedMetric.java @@ -19,16 +19,17 @@ package org.elasticsearch.search.aggregations.metrics.scripted; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.script.ExecutableScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import java.io.IOException; import java.util.ArrayList; @@ -62,13 +63,13 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement private InternalScriptedMetric() { } - private InternalScriptedMetric(String name, Map metaData) { - super(name, metaData); + private InternalScriptedMetric(String name, List reducers, Map metaData) { + super(name, reducers, metaData); } public InternalScriptedMetric(String name, Object aggregation, String scriptLang, ScriptType scriptType, String reduceScript, - Map reduceParams, Map metaData) { - this(name, metaData); + Map reduceParams, List reducers, Map metaData) { + this(name, reducers, metaData); this.aggregation = aggregation; this.scriptType = scriptType; this.reduceScript = reduceScript; @@ -82,7 +83,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { List aggregationObjects = new ArrayList<>(); for (InternalAggregation aggregation : aggregations) { InternalScriptedMetric mapReduceAggregation = (InternalScriptedMetric) aggregation; @@ -98,14 +99,14 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement params = new HashMap<>(); } params.put("_aggs", aggregationObjects); - ExecutableScript script = reduceContext.scriptService().executable(firstAggregation.scriptLang, firstAggregation.reduceScript, - firstAggregation.scriptType, ScriptContext.Standard.AGGS, params); + ExecutableScript script = reduceContext.scriptService().executable(new Script(firstAggregation.scriptLang, firstAggregation.reduceScript, + firstAggregation.scriptType, params), ScriptContext.Standard.AGGS); aggregation = script.run(); } else { aggregation = aggregationObjects; } return new InternalScriptedMetric(firstAggregation.getName(), aggregation, firstAggregation.scriptLang, firstAggregation.scriptType, - firstAggregation.reduceScript, firstAggregation.reduceParams, getMetaData()); + firstAggregation.reduceScript, firstAggregation.reduceParams, reducers(), getMetaData()); } @@ -121,7 +122,7 @@ public class InternalScriptedMetric extends InternalMetricsAggregation implement } else if (path.size() == 1 && "value".equals(path.get(0))) { return aggregation; } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java index 7d89b5994af..6ac07a6fedc 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricAggregator.java @@ -22,16 +22,19 @@ package org.elasticsearch.search.aggregations.metrics.scripted; import org.apache.lucene.index.LeafReaderContext; import org.elasticsearch.script.ExecutableScript; import org.elasticsearch.script.LeafSearchScript; +import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService; -import org.elasticsearch.script.*; import org.elasticsearch.script.ScriptService.ScriptType; +import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.SearchParseException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactory; -import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; +import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.internal.SearchContext; @@ -57,8 +60,9 @@ public class ScriptedMetricAggregator extends MetricsAggregator { protected ScriptedMetricAggregator(String name, String scriptLang, ScriptType initScriptType, String initScript, ScriptType mapScriptType, String mapScript, ScriptType combineScriptType, String combineScript, ScriptType reduceScriptType, - String reduceScript, Map params, Map reduceParams, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + String reduceScript, Map params, Map reduceParams, AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.scriptLang = scriptLang; this.reduceScriptType = reduceScriptType; if (params == null) { @@ -74,11 +78,11 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } ScriptService scriptService = context.searchContext().scriptService(); if (initScript != null) { - scriptService.executable(scriptLang, initScript, initScriptType, ScriptContext.Standard.AGGS, this.params).run(); + scriptService.executable(new Script(scriptLang, initScript, initScriptType, this.params), ScriptContext.Standard.AGGS).run(); } - this.mapScript = scriptService.search(context.searchContext().lookup(), scriptLang, mapScript, mapScriptType, ScriptContext.Standard.AGGS, this.params); + this.mapScript = scriptService.search(context.searchContext().lookup(), new Script(scriptLang, mapScript, mapScriptType, this.params), ScriptContext.Standard.AGGS); if (combineScript != null) { - this.combineScript = scriptService.executable(scriptLang, combineScript, combineScriptType, ScriptContext.Standard.AGGS, this.params); + this.combineScript = scriptService.executable(new Script(scriptLang, combineScript, combineScriptType, this.params), ScriptContext.Standard.AGGS); } else { this.combineScript = null; } @@ -112,12 +116,13 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } else { aggregation = params.get("_agg"); } - return new InternalScriptedMetric(name, aggregation, scriptLang, reduceScriptType, reduceScript, reduceParams, metaData()); + return new InternalScriptedMetric(name, aggregation, scriptLang, reduceScriptType, reduceScript, reduceParams, reducers(), + metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalScriptedMetric(name, null, scriptLang, reduceScriptType, reduceScript, reduceParams, metaData()); + return new InternalScriptedMetric(name, null, scriptLang, reduceScriptType, reduceScript, reduceParams, reducers(), metaData()); } public static class Factory extends AggregatorFactory { @@ -151,7 +156,8 @@ public class ScriptedMetricAggregator extends MetricsAggregator { } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (collectsFromSingleBucket == false) { return asMultiBucketAggregator(this, context, parent); } @@ -164,7 +170,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { reduceParams = deepCopyParams(this.reduceParams, context.searchContext()); } return new ScriptedMetricAggregator(name, scriptLang, initScriptType, initScript, mapScriptType, mapScript, combineScriptType, - combineScript, reduceScriptType, reduceScript, params, reduceParams, context, parent, metaData); + combineScript, reduceScriptType, reduceScript, params, reduceParams, context, parent, reducers, metaData); } @SuppressWarnings({ "unchecked" }) @@ -190,7 +196,7 @@ public class ScriptedMetricAggregator extends MetricsAggregator { clone = original; } else { throw new SearchParseException(context, "Can only clone primitives, String, ArrayList, and HashMap. Found: " - + original.getClass().getCanonicalName()); + + original.getClass().getCanonicalName(), null); } return clone; } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java index 1b0b5aa3290..c37d0aaccf8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/scripted/ScriptedMetricParser.java @@ -72,14 +72,17 @@ public class ScriptedMetricParser implements Aggregator.Parser { } else if (REDUCE_PARAMS_FIELD.match(currentFieldName)) { reduceParams = parser.map(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token.isValue()) { if (!scriptParameterParser.token(currentFieldName, token, parser)) { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } @@ -114,7 +117,7 @@ public class ScriptedMetricParser implements Aggregator.Parser { scriptLang = scriptParameterParser.lang(); if (mapScript == null) { - throw new SearchParseException(context, "map_script field is required in [" + aggregationName + "]."); + throw new SearchParseException(context, "map_script field is required in [" + aggregationName + "].", parser.getTokenLocation()); } return new ScriptedMetricAggregator.Factory(aggregationName, scriptLang, initScriptType, initScript, mapScriptType, mapScript, combineScriptType, combineScript, reduceScriptType, reduceScript, params, reduceParams); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java index 684dae5afdf..c7b68da9a88 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/InternalStats.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -70,8 +71,9 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue protected InternalStats() {} // for serialization public InternalStats(String name, long count, double sum, double min, double max, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { - super(name, metaData); + super(name, reducers, metaData); this.count = count; this.sum = sum; this.min = min; @@ -149,7 +151,7 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue } @Override - public InternalStats reduce(List aggregations, ReduceContext reduceContext) { + public InternalStats doReduce(List aggregations, ReduceContext reduceContext) { long count = 0; double min = Double.POSITIVE_INFINITY; double max = Double.NEGATIVE_INFINITY; @@ -161,7 +163,7 @@ public class InternalStats extends InternalNumericMetricsAggregation.MultiValue max = Math.max(max, stats.getMax()); sum += stats.getSum(); } - return new InternalStats(name, count, sum, min, max, valueFormatter, getMetaData()); + return new InternalStats(name, count, sum, min, max, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java index 8f431578fef..cf9356fee1e 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/StatsAggegator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.stats; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -31,6 +30,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,8 +56,9 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { public StatsAggegator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; if (valuesSource != null) { final BigArrays bigArrays = context.bigArrays(); @@ -135,7 +137,7 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { case max: return valuesSource == null ? Double.NEGATIVE_INFINITY : maxes.get(owningBucketOrd); case avg: return valuesSource == null ? Double.NaN : sums.get(owningBucketOrd) / counts.get(owningBucketOrd); default: - throw new ElasticsearchIllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); + throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); } } @@ -145,12 +147,12 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { return buildEmptyAggregation(); } return new InternalStats(name, counts.get(bucket), sums.get(bucket), mins.get(bucket), - maxes.get(bucket), formatter, metaData()); + maxes.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalStats(name, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, formatter, metaData()); + return new InternalStats(name, 0, 0, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -160,13 +162,15 @@ public class StatsAggegator extends NumericMetricsAggregator.MultiValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new StatsAggegator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new StatsAggegator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new StatsAggegator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new StatsAggegator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java index 75dc354f874..164fb86d00a 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsAggregator.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.metrics.stats.extended; import org.apache.lucene.index.LeafReaderContext; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.internal.Nullable; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; @@ -31,6 +30,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -38,6 +38,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -55,10 +56,10 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue DoubleArray maxes; DoubleArray sumOfSqrs; - public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, - @Nullable ValueFormatter formatter, AggregationContext context, - Aggregator parent, double sigma, Map metaData) throws IOException { - super(name, context, parent, metaData); + public ExtendedStatsAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, + AggregationContext context, Aggregator parent, double sigma, List reducers, Map metaData) + throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; this.sigma = sigma; @@ -154,7 +155,7 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue if (valuesSource == null) { return Double.NaN; } return (sums.get(owningBucketOrd) / counts.get(owningBucketOrd)) - (Math.sqrt(variance(owningBucketOrd)) * this.sigma); default: - throw new ElasticsearchIllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); + throw new IllegalArgumentException("Unknown value [" + name + "] in common stats aggregation"); } } @@ -167,16 +168,19 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue @Override public InternalAggregation buildAggregation(long owningBucketOrdinal) { if (valuesSource == null) { - return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, metaData()); + return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, + reducers(), metaData()); } assert owningBucketOrdinal < counts.size(); return new InternalExtendedStats(name, counts.get(owningBucketOrdinal), sums.get(owningBucketOrdinal), - mins.get(owningBucketOrdinal), maxes.get(owningBucketOrdinal), sumOfSqrs.get(owningBucketOrdinal), sigma, formatter, metaData()); + mins.get(owningBucketOrdinal), maxes.get(owningBucketOrdinal), sumOfSqrs.get(owningBucketOrdinal), sigma, formatter, + reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, metaData()); + return new InternalExtendedStats(name, 0, 0d, Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0d, 0d, formatter, reducers(), + metaData()); } @Override @@ -195,13 +199,16 @@ public class ExtendedStatsAggregator extends NumericMetricsAggregator.MultiValue } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new ExtendedStatsAggregator(name, null, config.formatter(), aggregationContext, parent, sigma, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new ExtendedStatsAggregator(name, null, config.formatter(), aggregationContext, parent, sigma, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new ExtendedStatsAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, sigma, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new ExtendedStatsAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, sigma, reducers, + metaData); } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java index 18ca93495c3..ea48e4b11f8 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/ExtendedStatsParser.java @@ -65,15 +65,17 @@ public class ExtendedStatsParser implements Aggregator.Parser { if (SIGMA.match(currentFieldName)) { sigma = parser.doubleValue(); } else { - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } if (sigma < 0) { - throw new SearchParseException(context, "[sigma] must not be negative. Value provided was" + sigma ); + throw new SearchParseException(context, "[sigma] must not be negative. Value provided was" + sigma, parser.getTokenLocation()); } return createFactory(aggregationName, vsParser.config(), sigma); diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java index 86051ba6320..7d1568156f5 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/stats/extended/InternalExtendedStats.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.stats.InternalStats; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; @@ -68,8 +69,8 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat InternalExtendedStats() {} // for serialization public InternalExtendedStats(String name, long count, double sum, double min, double max, double sumOfSqrs, - double sigma, @Nullable ValueFormatter formatter, Map metaData) { - super(name, count, sum, min, max, formatter, metaData); + double sigma, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, count, sum, min, max, formatter, reducers, metaData); this.sumOfSqrs = sumOfSqrs; this.sigma = sigma; } @@ -144,14 +145,15 @@ public class InternalExtendedStats extends InternalStats implements ExtendedStat } @Override - public InternalExtendedStats reduce(List aggregations, ReduceContext reduceContext) { + public InternalExtendedStats doReduce(List aggregations, ReduceContext reduceContext) { double sumOfSqrs = 0; for (InternalAggregation aggregation : aggregations) { InternalExtendedStats stats = (InternalExtendedStats) aggregation; sumOfSqrs += stats.getSumOfSquares(); } - final InternalStats stats = super.reduce(aggregations, reduceContext); - return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, valueFormatter, getMetaData()); + final InternalStats stats = super.doReduce(aggregations, reduceContext); + return new InternalExtendedStats(name, stats.getCount(), stats.getSum(), stats.getMin(), stats.getMax(), sumOfSqrs, sigma, + valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java index 7eb7e789710..00c8050e714 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/InternalSum.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; @@ -56,8 +57,8 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i InternalSum() {} // for serialization - InternalSum(String name, double sum, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + InternalSum(String name, double sum, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); this.sum = sum; this.valueFormatter = formatter; } @@ -78,12 +79,12 @@ public class InternalSum extends InternalNumericMetricsAggregation.SingleValue i } @Override - public InternalSum reduce(List aggregations, ReduceContext reduceContext) { + public InternalSum doReduce(List aggregations, ReduceContext reduceContext) { double sum = 0; for (InternalAggregation aggregation : aggregations) { sum += ((InternalSum) aggregation).sum; } - return new InternalSum(name, sum, valueFormatter, getMetaData()); + return new InternalSum(name, sum, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java index ab6b565a62b..4c7981422f3 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/sum/SumAggregator.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -36,6 +37,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,8 +51,9 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { DoubleArray sums; public SumAggregator(String name, ValuesSource.Numeric valuesSource, @Nullable ValueFormatter formatter, - AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + AggregationContext context, Aggregator parent, List reducers, + Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -96,12 +99,12 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= sums.size()) { return buildEmptyAggregation(); } - return new InternalSum(name, sums.get(bucket), formatter, metaData()); + return new InternalSum(name, sums.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalSum(name, 0.0, formatter, metaData()); + return new InternalSum(name, 0.0, formatter, reducers(), metaData()); } public static class Factory extends ValuesSourceAggregatorFactory.LeafOnly { @@ -111,13 +114,15 @@ public class SumAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new SumAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new SumAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new SumAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new SumAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java index b33f8bb092e..2ae1d3d50c9 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/InternalTopHits.java @@ -22,7 +22,6 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldDocs; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -32,11 +31,13 @@ import org.elasticsearch.search.SearchHits; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHits; import java.io.IOException; import java.util.List; +import java.util.Map; /** */ @@ -65,15 +66,15 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi InternalTopHits() { } - public InternalTopHits(String name, int from, int size, TopDocs topDocs, InternalSearchHits searchHits) { - this.name = name; + public InternalTopHits(String name, int from, int size, TopDocs topDocs, InternalSearchHits searchHits, List reducers, + Map metaData) { + super(name, reducers, metaData); this.from = from; this.size = size; this.topDocs = topDocs; this.searchHits = searchHits; } - @Override public Type type() { return TYPE; @@ -85,7 +86,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { InternalSearchHits[] shardHits = new InternalSearchHits[aggregations.size()]; final TopDocs reducedTopDocs; @@ -121,7 +122,9 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi } while (shardDocs[scoreDoc.shardIndex].scoreDocs[position] != scoreDoc); hits[i] = (InternalSearchHit) shardHits[scoreDoc.shardIndex].getAt(position); } - return new InternalTopHits(name, from, size, reducedTopDocs, new InternalSearchHits(hits, reducedTopDocs.totalHits, reducedTopDocs.getMaxScore())); + return new InternalTopHits(name, from, size, reducedTopDocs, new InternalSearchHits(hits, reducedTopDocs.totalHits, + reducedTopDocs.getMaxScore()), + reducers(), getMetaData()); } catch (IOException e) { throw ExceptionsHelper.convertToElastic(e); } @@ -132,7 +135,7 @@ public class InternalTopHits extends InternalMetricsAggregation implements TopHi if (path.isEmpty()) { return this; } else { - throw new ElasticsearchIllegalArgumentException("path not supported for [" + getName() + "]: " + path); + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java index 52b2c233cde..1f211471457 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsAggregator.java @@ -37,10 +37,12 @@ import org.elasticsearch.search.aggregations.AggregationInitializationException; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.metrics.MetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -49,6 +51,7 @@ import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SubSearchContext; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -69,8 +72,9 @@ public class TopHitsAggregator extends MetricsAggregator { final SubSearchContext subSearchContext; final LongObjectPagedHashMap topDocsCollectors; - public TopHitsAggregator(FetchPhase fetchPhase, SubSearchContext subSearchContext, String name, AggregationContext context, Aggregator parent, Map metaData) throws IOException { - super(name, context, parent, metaData); + public TopHitsAggregator(FetchPhase fetchPhase, SubSearchContext subSearchContext, String name, AggregationContext context, + Aggregator parent, List reducers, Map metaData) throws IOException { + super(name, context, parent, reducers, metaData); this.fetchPhase = fetchPhase; topDocsCollectors = new LongObjectPagedHashMap<>(1, context.bigArrays()); this.subSearchContext = subSearchContext; @@ -153,7 +157,8 @@ public class TopHitsAggregator extends MetricsAggregator { searchHitFields.sortValues(fieldDoc.fields); } } - topHits = new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits()); + topHits = new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, fetchResult.hits(), reducers(), + metaData()); } return topHits; } @@ -166,7 +171,7 @@ public class TopHitsAggregator extends MetricsAggregator { } else { topDocs = Lucene.EMPTY_TOP_DOCS; } - return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, InternalSearchHits.empty()); + return new InternalTopHits(name, subSearchContext.from(), subSearchContext.size(), topDocs, InternalSearchHits.empty(), reducers(), metaData()); } @Override @@ -186,8 +191,9 @@ public class TopHitsAggregator extends MetricsAggregator { } @Override - public Aggregator createInternal(AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new TopHitsAggregator(fetchPhase, subSearchContext, name, aggregationContext, parent, metaData); + public Aggregator createInternal(AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { + return new TopHitsAggregator(fetchPhase, subSearchContext, name, aggregationContext, parent, reducers, metaData); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java index 6300374663b..206587ac6a4 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/tophits/TopHitsParser.java @@ -94,7 +94,8 @@ public class TopHitsParser implements Aggregator.Parser { subSearchContext.explain(parser.booleanValue()); break; default: - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_OBJECT) { switch (currentFieldName) { @@ -106,7 +107,8 @@ public class TopHitsParser implements Aggregator.Parser { scriptFieldsParseElement.parse(parser, subSearchContext); break; default: - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else if (token == XContentParser.Token.START_ARRAY) { switch (currentFieldName) { @@ -115,10 +117,12 @@ public class TopHitsParser implements Aggregator.Parser { fieldDataFieldsParseElement.parse(parser, subSearchContext); break; default: - throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + currentFieldName + "]."); + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + aggregationName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); } } else { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } } catch (Exception e) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java index 8c6dbf9b038..9ebb7e534ea 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/InternalValueCount.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.aggregations.AggregationStreams; import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; @@ -55,8 +56,9 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single InternalValueCount() {} // for serialization - public InternalValueCount(String name, long value, @Nullable ValueFormatter formatter, Map metaData) { - super(name, metaData); + public InternalValueCount(String name, long value, @Nullable ValueFormatter formatter, List reducers, + Map metaData) { + super(name, reducers, metaData); this.value = value; this.valueFormatter = formatter; } @@ -77,12 +79,12 @@ public class InternalValueCount extends InternalNumericMetricsAggregation.Single } @Override - public InternalAggregation reduce(List aggregations, ReduceContext reduceContext) { + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { long valueCount = 0; for (InternalAggregation aggregation : aggregations) { valueCount += ((InternalValueCount) aggregation).value; } - return new InternalValueCount(name, valueCount, valueFormatter, getMetaData()); + return new InternalValueCount(name, valueCount, valueFormatter, reducers(), getMetaData()); } @Override diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java index a74ec061b8e..fedd7e09a2b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountAggregator.java @@ -29,6 +29,7 @@ import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.LeafBucketCollector; import org.elasticsearch.search.aggregations.LeafBucketCollectorBase; import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregator; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.AggregationContext; import org.elasticsearch.search.aggregations.support.ValuesSource; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregatorFactory; @@ -36,6 +37,7 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceConfig; import org.elasticsearch.search.aggregations.support.format.ValueFormatter; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -53,8 +55,9 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { LongArray counts; public ValueCountAggregator(String name, ValuesSource valuesSource, @Nullable ValueFormatter formatter, - AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - super(name, aggregationContext, parent, metaData); + AggregationContext aggregationContext, Aggregator parent, List reducers, Map metaData) + throws IOException { + super(name, aggregationContext, parent, reducers, metaData); this.valuesSource = valuesSource; this.formatter = formatter; if (valuesSource != null) { @@ -92,12 +95,12 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { if (valuesSource == null || bucket >= counts.size()) { return buildEmptyAggregation(); } - return new InternalValueCount(name, counts.get(bucket), formatter, metaData()); + return new InternalValueCount(name, counts.get(bucket), formatter, reducers(), metaData()); } @Override public InternalAggregation buildEmptyAggregation() { - return new InternalValueCount(name, 0l, formatter, metaData()); + return new InternalValueCount(name, 0l, formatter, reducers(), metaData()); } @Override @@ -112,13 +115,15 @@ public class ValueCountAggregator extends NumericMetricsAggregator.SingleValue { } @Override - protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException { - return new ValueCountAggregator(name, null, config.formatter(), aggregationContext, parent, metaData); + protected Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException { + return new ValueCountAggregator(name, null, config.formatter(), aggregationContext, parent, reducers, metaData); } @Override - protected Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { - return new ValueCountAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, + protected Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException { + return new ValueCountAggregator(name, valuesSource, config.formatter(), aggregationContext, parent, reducers, metaData); } diff --git a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java index fb1d31f41f0..764f6ce9384 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/metrics/valuecount/ValueCountParser.java @@ -49,7 +49,8 @@ public class ValueCountParser implements Aggregator.Parser { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); } else if (!vsParser.token(currentFieldName, token, parser)) { - throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "]."); + throw new SearchParseException(context, "Unexpected token " + token + " in [" + aggregationName + "].", + parser.getTokenLocation()); } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java new file mode 100644 index 00000000000..9eb70d860e5 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/BucketHelpers.java @@ -0,0 +1,187 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentLocation; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.InvalidAggregationPathException; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +/** + * A set of static helpers to simplify working with aggregation buckets, in particular + * providing utilities that help reducers. + */ +public class BucketHelpers { + + /** + * A gap policy determines how "holes" in a set of buckets should be handled. For example, + * a date_histogram might have empty buckets due to no data existing for that time interval. + * This can cause problems for operations like a derivative, which relies on a continuous + * function. + * + * "insert_zeros": empty buckets will be filled with zeros for all metrics + * "ignore": empty buckets will simply be ignored + */ + public static enum GapPolicy { + INSERT_ZEROS((byte) 0, "insert_zeros"), SKIP((byte) 1, "skip"); + + /** + * Parse a string GapPolicy into the byte enum + * + * @param context SearchContext this is taking place in + * @param text GapPolicy in string format (e.g. "ignore") + * @return GapPolicy enum + */ + public static GapPolicy parse(SearchContext context, String text, XContentLocation tokenLocation) { + GapPolicy result = null; + for (GapPolicy policy : values()) { + if (policy.parseField.match(text)) { + if (result == null) { + result = policy; + } else { + throw new IllegalStateException("Text can be parsed to 2 different gap policies: text=[" + text + + "], " + "policies=" + Arrays.asList(result, policy)); + } + } + } + if (result == null) { + final List validNames = new ArrayList<>(); + for (GapPolicy policy : values()) { + validNames.add(policy.getName()); + } + throw new SearchParseException(context, "Invalid gap policy: [" + text + "], accepted values: " + validNames, tokenLocation); + } + return result; + } + + private final byte id; + private final ParseField parseField; + + private GapPolicy(byte id, String name) { + this.id = id; + this.parseField = new ParseField(name); + } + + /** + * Serialize the GapPolicy to the output stream + * + * @param out + * @throws IOException + */ + public void writeTo(StreamOutput out) throws IOException { + out.writeByte(id); + } + + /** + * Deserialize the GapPolicy from the input stream + * + * @param in + * @return GapPolicy Enum + * @throws IOException + */ + public static GapPolicy readFrom(StreamInput in) throws IOException { + byte id = in.readByte(); + for (GapPolicy gapPolicy : values()) { + if (id == gapPolicy.id) { + return gapPolicy; + } + } + throw new IllegalStateException("Unknown GapPolicy with id [" + id + "]"); + } + + /** + * Return the english-formatted name of the GapPolicy + * + * @return English representation of GapPolicy + */ + public String getName() { + return parseField.getPreferredName(); + } + } + + /** + * Given a path and a set of buckets, this method will return the value inside the agg at + * that path. This is used to extract values for use by reducers (e.g. a derivative might need + * the price for each bucket). If the bucket is empty, the configured GapPolicy is invoked to + * resolve the missing bucket + * + * @param histo A series of agg buckets in the form of a histogram + * @param bucket A specific bucket that a value needs to be extracted from. This bucket should be present + * in the histo parameter + * @param aggPath The path to a particular value that needs to be extracted. This path should point to a metric + * inside the bucket + * @param gapPolicy The gap policy to apply if empty buckets are found + * @return The value extracted from bucket found at aggPath + */ + public static Double resolveBucketValue(InternalMultiBucketAggregation agg, + InternalMultiBucketAggregation.Bucket bucket, String aggPath, GapPolicy gapPolicy) { + List aggPathsList = AggregationPath.parse(aggPath).getPathElementsAsStringList(); + return resolveBucketValue(agg, bucket, aggPathsList, gapPolicy); + } + + public static Double resolveBucketValue(InternalMultiBucketAggregation agg, + InternalMultiBucketAggregation.Bucket bucket, List aggPathAsList, GapPolicy gapPolicy) { + try { + Object propertyValue = bucket.getProperty(agg.getName(), aggPathAsList); + if (propertyValue == null) { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + " must reference either a number value or a single value numeric metric aggregation"); + } else { + double value; + if (propertyValue instanceof Number) { + value = ((Number) propertyValue).doubleValue(); + } else if (propertyValue instanceof InternalNumericMetricsAggregation.SingleValue) { + value = ((InternalNumericMetricsAggregation.SingleValue) propertyValue).value(); + } else { + throw new AggregationExecutionException(DerivativeParser.BUCKETS_PATH.getPreferredName() + + " must reference either a number value or a single value numeric metric aggregation"); + } + // doc count never has missing values so gap policy doesn't apply here + boolean isDocCountProperty = aggPathAsList.size() == 1 && "_count".equals(aggPathAsList.get(0)); + if (Double.isInfinite(value) || Double.isNaN(value) || (bucket.getDocCount() == 0 && !isDocCountProperty)) { + switch (gapPolicy) { + case INSERT_ZEROS: + return 0.0; + case SKIP: + default: + return Double.NaN; + } + } else { + return value; + } + } + } catch (InvalidAggregationPathException e) { + return null; + } + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java new file mode 100644 index 00000000000..2106f3247e1 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/InternalSimpleValue.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.inject.internal.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationStreams; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.metrics.max.InternalMax; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class InternalSimpleValue extends InternalNumericMetricsAggregation.SingleValue implements SimpleValue { + + public final static Type TYPE = new Type("simple_value"); + + public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() { + @Override + public InternalSimpleValue readResult(StreamInput in) throws IOException { + InternalSimpleValue result = new InternalSimpleValue(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + AggregationStreams.registerStream(STREAM, TYPE.stream()); + } + + private double value; + + InternalSimpleValue() {} // for serialization + + public InternalSimpleValue(String name, double value, @Nullable ValueFormatter formatter, List reducers, Map metaData) { + super(name, reducers, metaData); + this.valueFormatter = formatter; + this.value = value; + } + + @Override + public double value() { + return value; + } + + public double getValue() { + return value; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalMax doReduce(List aggregations, ReduceContext reduceContext) { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + protected void doReadFrom(StreamInput in) throws IOException { + valueFormatter = ValueFormatterStreams.readOptional(in); + value = in.readDouble(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(valueFormatter, out); + out.writeDouble(value); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + boolean hasValue = !(Double.isInfinite(value) || Double.isNaN(value)); + builder.field(CommonFields.VALUE, hasValue ? value : null); + if (hasValue && valueFormatter != null) { + builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(value)); + } + return builder; + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java new file mode 100644 index 00000000000..8daa4d6180a --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/Reducer.java @@ -0,0 +1,130 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import com.google.common.base.Function; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Streamable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.Map; + +public abstract class Reducer implements Streamable { + + /** + * Parses the reducer request and creates the appropriate reducer factory + * for it. + * + * @see {@link ReducerFactory} + */ + public static interface Parser { + + public static final ParseField BUCKETS_PATH = new ParseField("buckets_path"); + + public static final ParseField FORMAT = new ParseField("format"); + public static final ParseField GAP_POLICY = new ParseField("gap_policy"); + + /** + * @return The reducer type this parser is associated with. + */ + String type(); + + /** + * Returns the reducer factory with which this parser is associated. + * + * @param reducerName + * The name of the reducer + * @param parser + * The xcontent parser + * @param context + * The search context + * @return The resolved reducer factory + * @throws java.io.IOException + * When parsing fails + */ + ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException; + + } + + public static final Function AGGREGATION_TRANFORM_FUNCTION = new Function() { + @Override + public InternalAggregation apply(Aggregation input) { + return (InternalAggregation) input; + } + }; + + private String name; + private String[] bucketsPaths; + private Map metaData; + + protected Reducer() { // for Serialisation + } + + protected Reducer(String name, String[] bucketsPaths, Map metaData) { + this.name = name; + this.bucketsPaths = bucketsPaths; + this.metaData = metaData; + } + + public String name() { + return name; + } + + public String[] bucketsPaths() { + return bucketsPaths; + } + + public Map metaData() { + return metaData; + } + + public abstract Type type(); + + public abstract InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext); + + @Override + public final void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + out.writeStringArray(bucketsPaths); + out.writeMap(metaData); + doWriteTo(out); + } + + protected abstract void doWriteTo(StreamOutput out) throws IOException; + + @Override + public final void readFrom(StreamInput in) throws IOException { + name = in.readString(); + bucketsPaths = in.readStringArray(); + metaData = in.readMap(); + doReadFrom(in); + } + + protected abstract void doReadFrom(StreamInput in) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java new file mode 100644 index 00000000000..4dee8ea96a2 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilder.java @@ -0,0 +1,85 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; + +import java.io.IOException; +import java.util.Map; + +/** + * A base class for all reducer builders. + */ +public abstract class ReducerBuilder> extends AbstractAggregationBuilder { + + private String[] bucketsPaths; + private Map metaData; + + /** + * Sole constructor, typically used by sub-classes. + */ + protected ReducerBuilder(String name, String type) { + super(name, type); + } + + /** + * Sets the paths to the buckets to use for this reducer + */ + public B setBucketsPaths(String... bucketsPaths) { + this.bucketsPaths = bucketsPaths; + return (B) this; + } + + /** + * Sets the meta data to be included in the reducer's response + */ + public B setMetaData(Map metaData) { + this.metaData = metaData; + return (B)this; + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(getName()); + + if (this.metaData != null) { + builder.field("meta", this.metaData); + } + builder.startObject(type); + + if (bucketsPaths != null) { + builder.startArray(Reducer.Parser.BUCKETS_PATH.getPreferredName()); + for (String path : bucketsPaths) { + builder.value(path); + } + builder.endArray(); + } + + internalXContent(builder, params); + + builder.endObject(); + + return builder.endObject(); + } + + protected abstract XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException; +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java new file mode 100644 index 00000000000..d2632721c64 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerBuilders.java @@ -0,0 +1,47 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.max.MaxBucketBuilder; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.min.MinBucketBuilder; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgBuilder; + +public final class ReducerBuilders { + + private ReducerBuilders() { + } + + public static final DerivativeBuilder derivative(String name) { + return new DerivativeBuilder(name); + } + + public static final MaxBucketBuilder maxBucket(String name) { + return new MaxBucketBuilder(name); + } + + public static final MinBucketBuilder minBucket(String name) { + return new MinBucketBuilder(name); + } + + public static final MovAvgBuilder movingAvg(String name) { + return new MovAvgBuilder(name); + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java new file mode 100644 index 00000000000..46ac844808c --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerFactory.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactory; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +/** + * A factory that knows how to create an {@link Aggregator} of a specific type. + */ +public abstract class ReducerFactory { + + protected String name; + protected String type; + protected String[] bucketsPaths; + protected Map metaData; + + /** + * Constructs a new reducer factory. + * + * @param name + * The aggregation name + * @param type + * The aggregation type + */ + public ReducerFactory(String name, String type, String[] bucketsPaths) { + this.name = name; + this.type = type; + this.bucketsPaths = bucketsPaths; + } + + /** + * Validates the state of this factory (makes sure the factory is properly + * configured) + * + * @param reducerFactories + * @param factories + * @param parent + */ + public final void validate(AggregatorFactory parent, AggregatorFactory[] factories, List reducerFactories) { + doValidate(parent, factories, reducerFactories); + } + + protected abstract Reducer createInternal(Map metaData) throws IOException; + + /** + * Creates the reducer + * + * @param context + * The aggregation context + * @param parent + * The parent aggregator (if this is a top level factory, the + * parent will be {@code null}) + * @param collectsFromSingleBucket + * If true then the created aggregator will only be collected + * with 0 as a bucket ordinal. Some factories can take + * advantage of this in order to return more optimized + * implementations. + * + * @return The created aggregator + */ + public final Reducer create() throws IOException { + Reducer aggregator = createInternal(this.metaData); + return aggregator; + } + + public void doValidate(AggregatorFactory parent, AggregatorFactory[] factories, List reducerFactories) { + } + + public void setMetaData(Map metaData) { + this.metaData = metaData; + } + + public String getName() { + return name; + } + + public String[] getBucketsPaths() { + return bucketsPaths; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java new file mode 100644 index 00000000000..7a4319e0a2b --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/ReducerStreams.java @@ -0,0 +1,68 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.search.aggregations.reducers; + +import com.google.common.collect.ImmutableMap; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * A registry for all the dedicated streams in the aggregation module. This is to support dynamic addAggregation that + * know how to stream themselves. + */ +public class ReducerStreams { + + private static ImmutableMap streams = ImmutableMap.of(); + + /** + * A stream that knows how to read an aggregation from the input. + */ + public static interface Stream { + Reducer readResult(StreamInput in) throws IOException; + } + + /** + * Registers the given stream and associate it with the given types. + * + * @param stream The streams to register + * @param types The types associated with the streams + */ + public static synchronized void registerStream(Stream stream, BytesReference... types) { + MapBuilder uStreams = MapBuilder.newMapBuilder(streams); + for (BytesReference type : types) { + uStreams.put(type, stream); + } + streams = uStreams.immutableMap(); + } + + /** + * Returns the stream that is registered for the given type + * + * @param type The given type + * @return The associated stream + */ + public static Stream stream(BytesReference type) { + return streams.get(type); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java new file mode 100644 index 00000000000..b0be9634ddc --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/SiblingReducer.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import com.google.common.collect.Lists; + +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public abstract class SiblingReducer extends Reducer { + + protected SiblingReducer() { // for Serialisation + super(); + } + + protected SiblingReducer(String name, String[] bucketsPaths, Map metaData) { + super(name, bucketsPaths, metaData); + } + + @SuppressWarnings("unchecked") + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + @SuppressWarnings("rawtypes") + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + List newBuckets = new ArrayList<>(); + for (int i = 0; i < buckets.size(); i++) { + InternalMultiBucketAggregation.InternalBucket bucket = (InternalMultiBucketAggregation.InternalBucket) buckets.get(i); + InternalAggregation aggToAdd = doReduce(bucket.getAggregations(), reduceContext); + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(aggToAdd); + InternalMultiBucketAggregation.InternalBucket newBucket = multiBucketsAgg.createBucket(new InternalAggregations(aggs), bucket); + newBuckets.add(newBucket); + } + + return multiBucketsAgg.create(newBuckets); + } + + public abstract InternalAggregation doReduce(Aggregations aggregations, ReduceContext context); +} diff --git a/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java similarity index 78% rename from src/main/java/org/elasticsearch/action/deletebyquery/package-info.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java index a4bb68271da..e1c510e1a29 100644 --- a/src/main/java/org/elasticsearch/action/deletebyquery/package-info.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/SimpleValue.java @@ -17,7 +17,10 @@ * under the License. */ -/** - * Delete by query action. - */ -package org.elasticsearch.action.deletebyquery; \ No newline at end of file +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.search.aggregations.metrics.NumericMetricsAggregation; + +public interface SimpleValue extends NumericMetricsAggregation.SingleValue { + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java new file mode 100644 index 00000000000..2229dd7baf5 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/InternalBucketMetricValue.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.AggregationStreams; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.metrics.InternalNumericMetricsAggregation; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +public class InternalBucketMetricValue extends InternalNumericMetricsAggregation.SingleValue { + + public final static Type TYPE = new Type("bucket_metric_value"); + + public final static AggregationStreams.Stream STREAM = new AggregationStreams.Stream() { + @Override + public InternalBucketMetricValue readResult(StreamInput in) throws IOException { + InternalBucketMetricValue result = new InternalBucketMetricValue(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + AggregationStreams.registerStream(STREAM, TYPE.stream()); + } + + private double value; + + private String[] keys; + + protected InternalBucketMetricValue() { + super(); + } + + public InternalBucketMetricValue(String name, String[] keys, double value, @Nullable ValueFormatter formatter, + List reducers, Map metaData) { + super(name, reducers, metaData); + this.keys = keys; + this.value = value; + this.valueFormatter = formatter; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public double value() { + return value; + } + + public String[] keys() { + return keys; + } + + @Override + public InternalAggregation doReduce(List aggregations, ReduceContext reduceContext) { + throw new UnsupportedOperationException("Not supported"); + } + + @Override + public Object getProperty(List path) { + if (path.isEmpty()) { + return this; + } else if (path.size() == 1 && "value".equals(path.get(0))) { + return value(); + } else if (path.size() == 1 && "keys".equals(path.get(0))) { + return keys(); + } else { + throw new IllegalArgumentException("path not supported for [" + getName() + "]: " + path); + } + } + + @Override + protected void doReadFrom(StreamInput in) throws IOException { + valueFormatter = ValueFormatterStreams.readOptional(in); + value = in.readDouble(); + keys = in.readStringArray(); + } + + @Override + protected void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(valueFormatter, out); + out.writeDouble(value); + out.writeStringArray(keys); + } + + @Override + public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { + boolean hasValue = !Double.isInfinite(value); + builder.field(CommonFields.VALUE, hasValue ? value : null); + if (hasValue && valueFormatter != null) { + builder.field(CommonFields.VALUE_AS_STRING, valueFormatter.format(value)); + } + builder.startArray("keys"); + for (String key : keys) { + builder.value(key); + } + builder.endArray(); + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java new file mode 100644 index 00000000000..31d588a6497 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketBuilder.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; + +import java.io.IOException; + +public class MaxBucketBuilder extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + + public MaxBucketBuilder(String name) { + super(name, MaxBucketReducer.TYPE.name()); + } + + public MaxBucketBuilder format(String format) { + this.format = format; + return this; + } + + public MaxBucketBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MaxBucketParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java new file mode 100644 index 00000000000..c8f3bad49f1 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketParser.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MaxBucketParser implements Reducer.Parser { + public static final ParseField FORMAT = new ParseField("format"); + + @Override + public String type() { + return MaxBucketReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.SKIP; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return new MaxBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java new file mode 100644 index 00000000000..1d2d5c8d26c --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/max/MaxBucketReducer.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.max; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class MaxBucketReducer extends SiblingReducer { + + public final static Type TYPE = new Type("max_bucket"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public MaxBucketReducer readResult(StreamInput in) throws IOException { + MaxBucketReducer result = new MaxBucketReducer(); + result.readFrom(in); + return result; + } + }; + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private MaxBucketReducer() { + } + + protected MaxBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { + super(name, bucketsPaths, metaData); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + public Type type() { + return TYPE; + } + + public InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { + List maxBucketKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); + for (Aggregation aggregation : aggregations) { + if (aggregation.getName().equals(bucketsPath.get(0))) { + bucketsPath = bucketsPath.subList(1, bucketsPath.size()); + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + for (int i = 0; i < buckets.size(); i++) { + Bucket bucket = buckets.get(i); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); + if (bucketValue != null) { + if (bucketValue > maxValue) { + maxBucketKeys.clear(); + maxBucketKeys.add(bucket.getKeyAsString()); + maxValue = bucketValue; + } else if (bucketValue.equals(maxValue)) { + maxBucketKeys.add(bucket.getKeyAsString()); + } + } + } + } + } + String[] keys = maxBucketKeys.toArray(new String[maxBucketKeys.size()]); + return new InternalBucketMetricValue(name(), keys, maxValue, formatter, Collections.EMPTY_LIST, metaData()); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private final GapPolicy gapPolicy; + + public Factory(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new MaxBucketReducer(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + } + + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java new file mode 100644 index 00000000000..b792b7bbac9 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketBuilder.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.derivative.DerivativeParser; + +import java.io.IOException; + +public class MinBucketBuilder extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + + public MinBucketBuilder(String name) { + super(name, MinBucketReducer.TYPE.name()); + } + + public MinBucketBuilder format(String format) { + this.format = format; + return this; + } + + public MinBucketBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MinBucketParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java new file mode 100644 index 00000000000..b956bdb6d79 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketParser.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class MinBucketParser implements Reducer.Parser { + public static final ParseField FORMAT = new ParseField("format"); + + @Override + public String type() { + return MinBucketReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.SKIP; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return new MinBucketReducer.Factory(reducerName, bucketsPaths, gapPolicy, formatter); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java new file mode 100644 index 00000000000..7ab257c9fb0 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/bucketmetrics/min/MinBucketReducer.java @@ -0,0 +1,151 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.bucketmetrics.min; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalMultiBucketAggregation; +import org.elasticsearch.search.aggregations.bucket.MultiBucketsAggregation.Bucket; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +public class MinBucketReducer extends SiblingReducer { + + public final static Type TYPE = new Type("min_bucket"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public MinBucketReducer readResult(StreamInput in) throws IOException { + MinBucketReducer result = new MinBucketReducer(); + result.readFrom(in); + return result; + } + }; + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private MinBucketReducer() { + } + + protected MinBucketReducer(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter, + Map metaData) { + super(name, bucketsPaths, metaData); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + public Type type() { + return TYPE; + } + + public InternalAggregation doReduce(Aggregations aggregations, ReduceContext context) { + List minBucketKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); + for (Aggregation aggregation : aggregations) { + if (aggregation.getName().equals(bucketsPath.get(0))) { + bucketsPath = bucketsPath.subList(1, bucketsPath.size()); + InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; + List buckets = multiBucketsAgg.getBuckets(); + for (int i = 0; i < buckets.size(); i++) { + Bucket bucket = buckets.get(i); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); + if (bucketValue != null) { + if (bucketValue < minValue) { + minBucketKeys.clear(); + minBucketKeys.add(bucket.getKeyAsString()); + minValue = bucketValue; + } else if (bucketValue.equals(minValue)) { + minBucketKeys.add(bucket.getKeyAsString()); + } + } + } + } + } + String[] keys = minBucketKeys.toArray(new String[minBucketKeys.size()]); + return new InternalBucketMetricValue(name(), keys, minValue, formatter, Collections.EMPTY_LIST, metaData()); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private final GapPolicy gapPolicy; + + public Factory(String name, String[] bucketsPaths, GapPolicy gapPolicy, @Nullable ValueFormatter formatter) { + super(name, TYPE.name(), bucketsPaths); + this.gapPolicy = gapPolicy; + this.formatter = formatter; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new MinBucketReducer(name, bucketsPaths, gapPolicy, formatter, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + } + + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java new file mode 100644 index 00000000000..210d56d4a6f --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeBuilder.java @@ -0,0 +1,59 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; + +import java.io.IOException; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + +public class DerivativeBuilder extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + + public DerivativeBuilder(String name) { + super(name, DerivativeReducer.TYPE.name()); + } + + public DerivativeBuilder format(String format) { + this.format = format; + return this; + } + + public DerivativeBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(DerivativeParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(DerivativeParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java new file mode 100644 index 00000000000..a049a285b0e --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeParser.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +public class DerivativeParser implements Reducer.Parser { + + @Override + public String type() { + return DerivativeReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + GapPolicy gapPolicy = GapPolicy.SKIP; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for derivative aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + return new DerivativeReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java new file mode 100644 index 00000000000..9d3397e8746 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/derivative/DerivativeReducer.java @@ -0,0 +1,157 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.derivative; + +import com.google.common.collect.Lists; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.resolveBucketValue; + +public class DerivativeReducer extends Reducer { + + public final static Type TYPE = new Type("derivative"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public DerivativeReducer readResult(StreamInput in) throws IOException { + DerivativeReducer result = new DerivativeReducer(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + + public DerivativeReducer() { + } + + public DerivativeReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + Map metadata) { + super(name, bucketsPaths, metadata); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + InternalHistogram histo = (InternalHistogram) aggregation; + List buckets = histo.getBuckets(); + InternalHistogram.Factory factory = histo.getFactory(); + + List newBuckets = new ArrayList<>(); + Double lastBucketValue = null; + for (InternalHistogram.Bucket bucket : buckets) { + Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); + if (lastBucketValue != null) { + double diff = thisBucketValue - lastBucketValue; + + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), AGGREGATION_TRANFORM_FUNCTION)); + aggs.add(new InternalSimpleValue(name(), diff, formatter, new ArrayList(), metaData())); + InternalHistogram.Bucket newBucket = factory.createBucket(bucket.getKey(), bucket.getDocCount(), new InternalAggregations( + aggs), bucket.getKeyed(), bucket.getFormatter()); + newBuckets.add(newBucket); + } else { + newBuckets.add(bucket); + } + lastBucketValue = thisBucketValue; + } + return factory.create(newBuckets, histo); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private GapPolicy gapPolicy; + + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy) { + super(name, TYPE.name(), bucketsPaths); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new DerivativeReducer(name, bucketsPaths, formatter, gapPolicy, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + if (!(parent instanceof HistogramAggregator.Factory)) { + throw new IllegalStateException("derivative reducer [" + name + + "] must have a histogram or date_histogram as parent"); + } else { + HistogramAggregator.Factory histoParent = (HistogramAggregator.Factory) parent; + if (histoParent.minDocCount() != 0) { + throw new IllegalStateException("parent histogram of derivative reducer [" + name + + "] must have min_doc_count of 0"); + } + } + } + + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java new file mode 100644 index 00000000000..5fba23957e9 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgBuilder.java @@ -0,0 +1,119 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg; + +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelBuilder; + +import java.io.IOException; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; + +/** + * A builder to create MovingAvg reducer aggregations + */ +public class MovAvgBuilder extends ReducerBuilder { + + private String format; + private GapPolicy gapPolicy; + private MovAvgModelBuilder modelBuilder; + private Integer window; + private Integer predict; + + public MovAvgBuilder(String name) { + super(name, MovAvgReducer.TYPE.name()); + } + + public MovAvgBuilder format(String format) { + this.format = format; + return this; + } + + /** + * Defines what should be done when a gap in the series is discovered + * + * @param gapPolicy A GapPolicy enum defining the selected policy + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder gapPolicy(GapPolicy gapPolicy) { + this.gapPolicy = gapPolicy; + return this; + } + + /** + * Sets a MovAvgModelBuilder for the Moving Average. The model builder is used to + * define what type of moving average you want to use on the series + * + * @param modelBuilder A MovAvgModelBuilder which has been prepopulated with settings + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder modelBuilder(MovAvgModelBuilder modelBuilder) { + this.modelBuilder = modelBuilder; + return this; + } + + /** + * Sets the window size for the moving average. This window will "slide" across the + * series, and the values inside that window will be used to calculate the moving avg value + * + * @param window Size of window + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder window(int window) { + this.window = window; + return this; + } + + /** + * Sets the number of predictions that should be returned. Each prediction will be spaced at + * the intervals specified in the histogram. E.g "predict: 2" will return two new buckets at the + * end of the histogram with the predicted values. + * + * @param numPredictions Number of predictions to make + * @return Returns the builder to continue chaining + */ + public MovAvgBuilder predict(int numPredictions) { + this.predict = numPredictions; + return this; + } + + + @Override + protected XContentBuilder internalXContent(XContentBuilder builder, Params params) throws IOException { + if (format != null) { + builder.field(MovAvgParser.FORMAT.getPreferredName(), format); + } + if (gapPolicy != null) { + builder.field(MovAvgParser.GAP_POLICY.getPreferredName(), gapPolicy.getName()); + } + if (modelBuilder != null) { + modelBuilder.toXContent(builder, params); + } + if (window != null) { + builder.field(MovAvgParser.WINDOW.getPreferredName(), window); + } + if (predict != null) { + builder.field(MovAvgParser.PREDICT.getPreferredName(), predict); + } + return builder; + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java new file mode 100644 index 00000000000..4ebf6e10d32 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgParser.java @@ -0,0 +1,155 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg; + +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.search.SearchParseException; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelParser; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelParserMapper; +import org.elasticsearch.search.aggregations.support.format.ValueFormat; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.internal.SearchContext; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class MovAvgParser implements Reducer.Parser { + + public static final ParseField MODEL = new ParseField("model"); + public static final ParseField WINDOW = new ParseField("window"); + public static final ParseField SETTINGS = new ParseField("settings"); + public static final ParseField PREDICT = new ParseField("predict"); + + private final MovAvgModelParserMapper movAvgModelParserMapper; + + @Inject + public MovAvgParser(MovAvgModelParserMapper movAvgModelParserMapper) { + this.movAvgModelParserMapper = movAvgModelParserMapper; + } + + @Override + public String type() { + return MovAvgReducer.TYPE.name(); + } + + @Override + public ReducerFactory parse(String reducerName, XContentParser parser, SearchContext context) throws IOException { + XContentParser.Token token; + String currentFieldName = null; + String[] bucketsPaths = null; + String format = null; + + GapPolicy gapPolicy = GapPolicy.SKIP; + int window = 5; + Map settings = null; + String model = "simple"; + int predict = 0; + + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.VALUE_NUMBER) { + if (WINDOW.match(currentFieldName)) { + window = parser.intValue(); + if (window <= 0) { + throw new SearchParseException(context, "[" + currentFieldName + "] value must be a positive, " + + "non-zero integer. Value supplied was [" + predict + "] in [" + reducerName + "].", + parser.getTokenLocation()); + } + } else if (PREDICT.match(currentFieldName)) { + predict = parser.intValue(); + if (predict <= 0) { + throw new SearchParseException(context, "[" + currentFieldName + "] value must be a positive, " + + "non-zero integer. Value supplied was [" + predict + "] in [" + reducerName + "].", + parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.VALUE_STRING) { + if (FORMAT.match(currentFieldName)) { + format = parser.text(); + } else if (BUCKETS_PATH.match(currentFieldName)) { + bucketsPaths = new String[] { parser.text() }; + } else if (GAP_POLICY.match(currentFieldName)) { + gapPolicy = GapPolicy.parse(context, parser.text(), parser.getTokenLocation()); + } else if (MODEL.match(currentFieldName)) { + model = parser.text(); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_ARRAY) { + if (BUCKETS_PATH.match(currentFieldName)) { + List paths = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + String path = parser.text(); + paths.add(path); + } + bucketsPaths = paths.toArray(new String[paths.size()]); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else if (token == XContentParser.Token.START_OBJECT) { + if (SETTINGS.match(currentFieldName)) { + settings = parser.map(); + } else { + throw new SearchParseException(context, "Unknown key for a " + token + " in [" + reducerName + "]: [" + + currentFieldName + "].", parser.getTokenLocation()); + } + } else { + throw new SearchParseException(context, "Unexpected token " + token + " in [" + reducerName + "].", + parser.getTokenLocation()); + } + } + + if (bucketsPaths == null) { + throw new SearchParseException(context, "Missing required field [" + BUCKETS_PATH.getPreferredName() + + "] for movingAvg aggregation [" + reducerName + "]", parser.getTokenLocation()); + } + + ValueFormatter formatter = null; + if (format != null) { + formatter = ValueFormat.Patternable.Number.format(format).formatter(); + } + + MovAvgModelParser modelParser = movAvgModelParserMapper.get(model); + if (modelParser == null) { + throw new SearchParseException(context, "Unknown model [" + model + "] specified. Valid options are:" + + movAvgModelParserMapper.getAllNames().toString(), parser.getTokenLocation()); + } + MovAvgModel movAvgModel = modelParser.parse(settings); + + + return new MovAvgReducer.Factory(reducerName, bucketsPaths, formatter, gapPolicy, window, predict, movAvgModel); + } + + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java new file mode 100644 index 00000000000..d9ff1b4173e --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/MovAvgReducer.java @@ -0,0 +1,233 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg; + +import com.google.common.base.Function; +import com.google.common.collect.EvictingQueue; +import com.google.common.collect.Lists; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; +import org.elasticsearch.search.aggregations.InternalAggregation.Type; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.InternalSimpleValue; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerFactory; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelStreams; +import org.elasticsearch.search.aggregations.support.format.ValueFormatter; +import org.elasticsearch.search.aggregations.support.format.ValueFormatterStreams; +import org.joda.time.DateTime; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.search.aggregations.reducers.BucketHelpers.resolveBucketValue; + +public class MovAvgReducer extends Reducer { + + public final static Type TYPE = new Type("moving_avg"); + + public final static ReducerStreams.Stream STREAM = new ReducerStreams.Stream() { + @Override + public MovAvgReducer readResult(StreamInput in) throws IOException { + MovAvgReducer result = new MovAvgReducer(); + result.readFrom(in); + return result; + } + }; + + public static void registerStreams() { + ReducerStreams.registerStream(STREAM, TYPE.stream()); + } + + private static final Function FUNCTION = new Function() { + @Override + public InternalAggregation apply(Aggregation input) { + return (InternalAggregation) input; + } + }; + + private ValueFormatter formatter; + private GapPolicy gapPolicy; + private int window; + private MovAvgModel model; + private int predict; + + public MovAvgReducer() { + } + + public MovAvgReducer(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + int window, int predict, MovAvgModel model, Map metadata) { + super(name, bucketsPaths, metadata); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + this.window = window; + this.model = model; + this.predict = predict; + } + + @Override + public Type type() { + return TYPE; + } + + @Override + public InternalAggregation reduce(InternalAggregation aggregation, ReduceContext reduceContext) { + InternalHistogram histo = (InternalHistogram) aggregation; + List buckets = histo.getBuckets(); + InternalHistogram.Factory factory = histo.getFactory(); + + List newBuckets = new ArrayList<>(); + EvictingQueue values = EvictingQueue.create(this.window); + + long lastKey = 0; + long interval = Long.MAX_VALUE; + Object currentKey; + + for (InternalHistogram.Bucket bucket : buckets) { + Double thisBucketValue = resolveBucketValue(histo, bucket, bucketsPaths()[0], gapPolicy); + currentKey = bucket.getKey(); + + if (!(thisBucketValue == null || thisBucketValue.equals(Double.NaN))) { + values.offer(thisBucketValue); + + double movavg = model.next(values); + + List aggs = new ArrayList<>(Lists.transform(bucket.getAggregations().asList(), FUNCTION)); + aggs.add(new InternalSimpleValue(name(), movavg, formatter, new ArrayList(), metaData())); + InternalHistogram.Bucket newBucket = factory.createBucket(currentKey, bucket.getDocCount(), new InternalAggregations( + aggs), bucket.getKeyed(), bucket.getFormatter()); + newBuckets.add(newBucket); + + } else { + newBuckets.add(bucket); + } + + if (predict > 0) { + if (currentKey instanceof Number) { + interval = Math.min(interval, ((Number) bucket.getKey()).longValue() - lastKey); + lastKey = ((Number) bucket.getKey()).longValue(); + } else if (currentKey instanceof DateTime) { + interval = Math.min(interval, ((DateTime) bucket.getKey()).getMillis() - lastKey); + lastKey = ((DateTime) bucket.getKey()).getMillis(); + } else { + throw new AggregationExecutionException("Expected key of type Number or DateTime but got [" + currentKey + "]"); + } + } + + } + + + if (buckets.size() > 0 && predict > 0) { + + boolean keyed; + ValueFormatter formatter; + keyed = buckets.get(0).getKeyed(); + formatter = buckets.get(0).getFormatter(); + + double[] predictions = model.predict(values, predict); + for (int i = 0; i < predictions.length; i++) { + List aggs = new ArrayList<>(); + aggs.add(new InternalSimpleValue(name(), predictions[i], formatter, new ArrayList(), metaData())); + InternalHistogram.Bucket newBucket = factory.createBucket(lastKey + (interval * (i + 1)), 0, new InternalAggregations( + aggs), keyed, formatter); + newBuckets.add(newBucket); + } + } + + return factory.create(newBuckets, histo); + } + + @Override + public void doReadFrom(StreamInput in) throws IOException { + formatter = ValueFormatterStreams.readOptional(in); + gapPolicy = GapPolicy.readFrom(in); + window = in.readVInt(); + predict = in.readVInt(); + model = MovAvgModelStreams.read(in); + + } + + @Override + public void doWriteTo(StreamOutput out) throws IOException { + ValueFormatterStreams.writeOptional(formatter, out); + gapPolicy.writeTo(out); + out.writeVInt(window); + out.writeVInt(predict); + model.writeTo(out); + + } + + public static class Factory extends ReducerFactory { + + private final ValueFormatter formatter; + private GapPolicy gapPolicy; + private int window; + private MovAvgModel model; + private int predict; + + public Factory(String name, String[] bucketsPaths, @Nullable ValueFormatter formatter, GapPolicy gapPolicy, + int window, int predict, MovAvgModel model) { + super(name, TYPE.name(), bucketsPaths); + this.formatter = formatter; + this.gapPolicy = gapPolicy; + this.window = window; + this.model = model; + this.predict = predict; + } + + @Override + protected Reducer createInternal(Map metaData) throws IOException { + return new MovAvgReducer(name, bucketsPaths, formatter, gapPolicy, window, predict, model, metaData); + } + + @Override + public void doValidate(AggregatorFactory parent, AggregatorFactory[] aggFactories, List reducerFactories) { + if (bucketsPaths.length != 1) { + throw new IllegalStateException(Reducer.Parser.BUCKETS_PATH.getPreferredName() + + " must contain a single entry for reducer [" + name + "]"); + } + if (!(parent instanceof HistogramAggregator.Factory)) { + throw new IllegalStateException("derivative reducer [" + name + + "] must have a histogram or date_histogram as parent"); + } else { + HistogramAggregator.Factory histoParent = (HistogramAggregator.Factory) parent; + if (histoParent.minDocCount() != 0) { + throw new IllegalStateException("parent histogram of derivative reducer [" + name + + "] must have min_doc_count of 0"); + } + } + } + + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java new file mode 100644 index 00000000000..7d32989cda1 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/DoubleExpModel.java @@ -0,0 +1,214 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.*; + +/** + * Calculate a doubly exponential weighted moving average + */ +public class DoubleExpModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("double_exp"); + + /** + * Controls smoothing of data. Alpha = 1 retains no memory of past values + * (e.g. random walk), while alpha = 0 retains infinite memory of past values (e.g. + * mean of the series). Useful values are somewhere in between + */ + private double alpha; + + /** + * Equivalent to alpha, but controls the smoothing of the trend instead of the data + */ + private double beta; + + public DoubleExpModel(double alpha, double beta) { + this.alpha = alpha; + this.beta = beta; + } + + /** + * Predicts the next `n` values in the series, using the smoothing model to generate new values. + * Unlike the other moving averages, double-exp has forecasting/prediction built into the algorithm. + * Prediction is more than simply adding the next prediction to the window and repeating. Double-exp + * will extrapolate into the future by applying the trend information to the smoothed data. + * + * @param values Collection of numerics to movingAvg, usually windowed + * @param numPredictions Number of newly generated predictions to return + * @param Type of numeric + * @return Returns an array of doubles, since most smoothing methods operate on floating points + */ + @Override + public double[] predict(Collection values, int numPredictions) { + return next(values, numPredictions); + } + + @Override + public double next(Collection values) { + return next(values, 1)[0]; + } + + /** + * Calculate a doubly exponential weighted moving average + * + * @param values Collection of values to calculate avg for + * @param numForecasts number of forecasts into the future to return + * + * @param Type T extending Number + * @return Returns a Double containing the moving avg for the window + */ + public double[] next(Collection values, int numForecasts) { + + if (values.size() == 0) { + return emptyPredictions(numForecasts); + } + + // Smoothed value + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + int counter = 0; + + //TODO bail if too few values + + T last; + for (T v : values) { + last = v; + if (counter == 1) { + s = v.doubleValue(); + b = v.doubleValue() - last.doubleValue(); + } else { + s = alpha * v.doubleValue() + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + double[] forecastValues = new double[numForecasts]; + for (int i = 0; i < numForecasts; i++) { + forecastValues[i] = s + (i * b); + } + + return forecastValues; + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new DoubleExpModel(in.readDouble(), in.readDouble()); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + out.writeDouble(alpha); + out.writeDouble(beta); + } + + public static class DoubleExpModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + + Double alpha; + Double beta; + + if (settings == null || (alpha = (Double)settings.get("alpha")) == null) { + alpha = 0.5; + } + + if (settings == null || (beta = (Double)settings.get("beta")) == null) { + beta = 0.5; + } + + return new DoubleExpModel(alpha, beta); + } + } + + public static class DoubleExpModelBuilder implements MovAvgModelBuilder { + + private double alpha = 0.5; + private double beta = 0.5; + + /** + * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values + * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. + * the series mean). Useful values are somewhere in between. Defaults to 0.5. + * + * @param alpha A double between 0-1 inclusive, controls data smoothing + * + * @return The builder to continue chaining + */ + public DoubleExpModelBuilder alpha(double alpha) { + this.alpha = alpha; + return this; + } + + /** + * Equivalent to alpha, but controls the smoothing of the trend instead of the data + * + * @param beta a double between 0-1 inclusive, controls trend smoothing + * + * @return The builder to continue chaining + */ + public DoubleExpModelBuilder beta(double beta) { + this.beta = beta; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + builder.startObject(MovAvgParser.SETTINGS.getPreferredName()); + builder.field("alpha", alpha); + builder.field("beta", beta); + builder.endObject(); + return builder; + } + } +} + diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java new file mode 100644 index 00000000000..6c269590d33 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/LinearModel.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * Calculate a linearly weighted moving average, such that older values are + * linearly less important. "Time" is determined by position in collection + */ +public class LinearModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("linear"); + + @Override + public double next(Collection values) { + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (T v : values) { + avg += v.doubleValue() * current; + totalWeight += current; + current += 1; + } + return avg / totalWeight; + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new LinearModel(); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + } + + public static class LinearModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + return new LinearModel(); + } + } + + public static class LinearModelBuilder implements MovAvgModelBuilder { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + return builder; + } + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java new file mode 100644 index 00000000000..8b8e80cff2f --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModel.java @@ -0,0 +1,97 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.EvictingQueue; + +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; + +public abstract class MovAvgModel { + + /** + * Returns the next value in the series, according to the underlying smoothing model + * + * @param values Collection of numerics to movingAvg, usually windowed + * @param Type of numeric + * @return Returns a double, since most smoothing methods operate on floating points + */ + public abstract double next(Collection values); + + /** + * Predicts the next `n` values in the series, using the smoothing model to generate new values. + * Default prediction mode is to simply continuing calling next() and adding the + * predicted value back into the windowed buffer. + * + * @param values Collection of numerics to movingAvg, usually windowed + * @param numPredictions Number of newly generated predictions to return + * @param Type of numeric + * @return Returns an array of doubles, since most smoothing methods operate on floating points + */ + public double[] predict(Collection values, int numPredictions) { + double[] predictions = new double[numPredictions]; + + // If there are no values, we can't do anything. Return an array of NaNs. + if (values.size() == 0) { + return emptyPredictions(numPredictions); + } + + // special case for one prediction, avoids allocation + if (numPredictions < 1) { + throw new IllegalArgumentException("numPredictions may not be less than 1."); + } else if (numPredictions == 1){ + predictions[0] = next(values); + return predictions; + } + + Collection predictionBuffer = EvictingQueue.create(values.size()); + predictionBuffer.addAll(values); + + for (int i = 0; i < numPredictions; i++) { + predictions[i] = next(predictionBuffer); + + // Add the last value to the buffer, so we can keep predicting + predictionBuffer.add(predictions[i]); + } + + return predictions; + } + + protected double[] emptyPredictions(int numPredictions) { + double[] predictions = new double[numPredictions]; + Arrays.fill(predictions, Double.NaN); + return predictions; + } + + /** + * Write the model to the output stream + * + * @param out Output stream + * @throws IOException + */ + public abstract void writeTo(StreamOutput out) throws IOException; +} + + + + diff --git a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java similarity index 66% rename from src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java index 895ba668a0e..a8f40d474ac 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/DefaultFsIndexStoreModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelBuilder.java @@ -17,18 +17,16 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.search.aggregations.reducers.movavg.models; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; + +import java.io.IOException; /** - * + * Represents the common interface that all moving average models share. Moving + * average models are used by the MovAvg reducer */ -public class DefaultFsIndexStoreModule extends AbstractModule { - - @Override - protected void configure() { - bind(IndexStore.class).to(DefaultFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file +public interface MovAvgModelBuilder extends ToXContent { +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java new file mode 100644 index 00000000000..71ccbcb31b0 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelModule.java @@ -0,0 +1,55 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.Lists; +import org.elasticsearch.common.inject.AbstractModule; +import org.elasticsearch.common.inject.multibindings.Multibinder; + +import java.util.List; + +/** + * Register the various model parsers + */ +public class MovAvgModelModule extends AbstractModule { + + private List> parsers = Lists.newArrayList(); + + public MovAvgModelModule() { + registerParser(SimpleModel.SimpleModelParser.class); + registerParser(LinearModel.LinearModelParser.class); + registerParser(SingleExpModel.SingleExpModelParser.class); + registerParser(DoubleExpModel.DoubleExpModelParser.class); + } + + public void registerParser(Class parser) { + parsers.add(parser); + } + + @Override + protected void configure() { + Multibinder parserMapBinder = Multibinder.newSetBinder(binder(), MovAvgModelParser.class); + for (Class clazz : parsers) { + parserMapBinder.addBinding().to(clazz); + } + bind(MovAvgModelParserMapper.class); + } +} diff --git a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java similarity index 69% rename from src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java index 7f655908355..d27e447baa4 100644 --- a/src/main/java/org/elasticsearch/index/store/fs/MmapFsIndexStoreModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParser.java @@ -17,18 +17,18 @@ * under the License. */ -package org.elasticsearch.index.store.fs; +package org.elasticsearch.search.aggregations.reducers.movavg.models; -import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.index.store.IndexStore; + +import org.elasticsearch.common.Nullable; + +import java.util.Map; /** - * + * Common interface for parsers used by the various Moving Average models */ -public class MmapFsIndexStoreModule extends AbstractModule { +public interface MovAvgModelParser { + public MovAvgModel parse(@Nullable Map settings); - @Override - protected void configure() { - bind(IndexStore.class).to(MmapFsIndexStore.class).asEagerSingleton(); - } -} \ No newline at end of file + public String getName(); +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java new file mode 100644 index 00000000000..459729d8960 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelParserMapper.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.inject.Inject; + +import java.util.Set; + +/** + * Contains a map of all concrete model parsers which can be used to build Models + */ +public class MovAvgModelParserMapper { + + protected ImmutableMap movAvgParsers; + + @Inject + public MovAvgModelParserMapper(Set parsers) { + MapBuilder builder = MapBuilder.newMapBuilder(); + for (MovAvgModelParser parser : parsers) { + builder.put(parser.getName(), parser); + } + movAvgParsers = builder.immutableMap(); + } + + public @Nullable + MovAvgModelParser get(String parserName) { + return movAvgParsers.get(parserName); + } + + public ImmutableSet getAllNames() { + return movAvgParsers.keySet(); + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java new file mode 100644 index 00000000000..b11a3687021 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/MovAvgModelStreams.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.common.collect.MapBuilder; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +/** + * A registry for all moving average models. This is needed for reading them from a stream without knowing which + * one it is. + */ +public class MovAvgModelStreams { + + private static ImmutableMap STREAMS = ImmutableMap.of(); + + public static MovAvgModel read(StreamInput in) throws IOException { + return stream(in.readString()).readResult(in); + } + + /** + * A stream that knows how to read an heuristic from the input. + */ + public static interface Stream { + + MovAvgModel readResult(StreamInput in) throws IOException; + + String getName(); + } + + /** + * Registers the given stream and associate it with the given types. + * + * @param stream The stream to register + * @param names The names associated with the streams + */ + public static synchronized void registerStream(Stream stream, String... names) { + MapBuilder uStreams = MapBuilder.newMapBuilder(STREAMS); + for (String name : names) { + uStreams.put(name, stream); + } + STREAMS = uStreams.immutableMap(); + } + + /** + * Returns the stream that is registered for the given name + * + * @param name The given name + * @return The associated stream + */ + public static Stream stream(String name) { + return STREAMS.get(name); + } + +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java new file mode 100644 index 00000000000..243b022af2c --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SimpleModel.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * Calculate a simple unweighted (arithmetic) moving average + */ +public class SimpleModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("simple"); + + @Override + public double next(Collection values) { + double avg = 0; + for (T v : values) { + avg += v.doubleValue(); + } + return avg / values.size(); + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new SimpleModel(); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + } + + public static class SimpleModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + return new SimpleModel(); + } + } + + public static class SimpleModelBuilder implements MovAvgModelBuilder { + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + return builder; + } + } +} diff --git a/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java new file mode 100644 index 00000000000..f17ba68f498 --- /dev/null +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/SingleExpModel.java @@ -0,0 +1,133 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.movavg.models; + +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.MovAvgParser; + +import java.io.IOException; +import java.util.Collection; +import java.util.Map; + +/** + * Calculate a exponentially weighted moving average + */ +public class SingleExpModel extends MovAvgModel { + + protected static final ParseField NAME_FIELD = new ParseField("single_exp"); + + /** + * Controls smoothing of data. Alpha = 1 retains no memory of past values + * (e.g. random walk), while alpha = 0 retains infinite memory of past values (e.g. + * mean of the series). Useful values are somewhere in between + */ + private double alpha; + + public SingleExpModel(double alpha) { + this.alpha = alpha; + } + + + @Override + public double next(Collection values) { + double avg = 0; + boolean first = true; + + for (T v : values) { + if (first) { + avg = v.doubleValue(); + first = false; + } else { + avg = (v.doubleValue() * alpha) + (avg * (1 - alpha)); + } + } + return avg; + } + + public static final MovAvgModelStreams.Stream STREAM = new MovAvgModelStreams.Stream() { + @Override + public MovAvgModel readResult(StreamInput in) throws IOException { + return new SingleExpModel(in.readDouble()); + } + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + }; + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(STREAM.getName()); + out.writeDouble(alpha); + } + + public static class SingleExpModelParser implements MovAvgModelParser { + + @Override + public String getName() { + return NAME_FIELD.getPreferredName(); + } + + @Override + public MovAvgModel parse(@Nullable Map settings) { + + Double alpha; + if (settings == null || (alpha = (Double)settings.get("alpha")) == null) { + alpha = 0.5; + } + + return new SingleExpModel(alpha); + } + } + + public static class SingleExpModelBuilder implements MovAvgModelBuilder { + + private double alpha = 0.5; + + /** + * Alpha controls the smoothing of the data. Alpha = 1 retains no memory of past values + * (e.g. a random walk), while alpha = 0 retains infinite memory of past values (e.g. + * the series mean). Useful values are somewhere in between. Defaults to 0.5. + * + * @param alpha A double between 0-1 inclusive, controls data smoothing + * + * @return The builder to continue chaining + */ + public SingleExpModelBuilder alpha(double alpha) { + this.alpha = alpha; + return this; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(MovAvgParser.MODEL.getPreferredName(), NAME_FIELD.getPreferredName()); + builder.startObject(MovAvgParser.SETTINGS.getPreferredName()); + builder.field("alpha", alpha); + builder.endObject(); + return builder; + } + } +} + diff --git a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java similarity index 52% rename from src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java rename to src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java index b244d5ff9b6..bc085f6241a 100644 --- a/src/main/java/org/elasticsearch/index/cache/query/parser/QueryParserCacheModule.java +++ b/src/main/java/org/elasticsearch/search/aggregations/reducers/movavg/models/TransportMovAvgModelModule.java @@ -17,28 +17,35 @@ * under the License. */ -package org.elasticsearch.index.cache.query.parser; +package org.elasticsearch.search.aggregations.reducers.movavg.models; +import com.google.common.collect.Lists; import org.elasticsearch.common.inject.AbstractModule; -import org.elasticsearch.common.inject.Scopes; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.cache.query.parser.resident.ResidentQueryParserCache; + +import java.util.List; /** - * + * Register the transport streams so that models can be serialized/deserialized from the stream */ -public class QueryParserCacheModule extends AbstractModule { +public class TransportMovAvgModelModule extends AbstractModule { - private final Settings settings; + private List streams = Lists.newArrayList(); - public QueryParserCacheModule(Settings settings) { - this.settings = settings; + public TransportMovAvgModelModule() { + registerStream(SimpleModel.STREAM); + registerStream(LinearModel.STREAM); + registerStream(SingleExpModel.STREAM); + registerStream(DoubleExpModel.STREAM); + } + + public void registerStream(MovAvgModelStreams.Stream stream) { + streams.add(stream); } @Override protected void configure() { - bind(QueryParserCache.class) - .to(settings.getAsClass("index.cache.query.parser.type", ResidentQueryParserCache.class, "org.elasticsearch.index.cache.query.parser.", "QueryParserCache")) - .in(Scopes.SINGLETON); + for (MovAvgModelStreams.Stream stream : streams) { + MovAvgModelStreams.registerStream(stream, stream.getName()); + } } } diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java index 6dae0975d91..14d6c5e4a7b 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/AggregationPath.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationExecutionException; @@ -158,7 +157,7 @@ public class AggregationPath { public AggregationPath(List tokens) { this.pathElements = tokens; if (tokens == null || tokens.size() == 0) { - throw new ElasticsearchIllegalArgumentException("Invalid path [" + this + "]"); + throw new IllegalArgumentException("Invalid path [" + this + "]"); } } @@ -206,13 +205,13 @@ public class AggregationPath { Aggregation agg = parent.getAggregations().get(token.name); if (agg == null) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Cannot find aggregation named [" + token.name + "]"); } if (agg instanceof SingleBucketAggregation) { if (token.key != null && !token.key.equals("doc_count")) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Unknown value key [" + token.key + "] for single-bucket aggregation [" + token.name + "]. Either use [doc_count] as key or drop the key all together"); } @@ -223,13 +222,13 @@ public class AggregationPath { // the agg can only be a metrics agg, and a metrics agg must be at the end of the path if (i != pathElements.size() - 1) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Metrics aggregations cannot have sub-aggregations (at [" + token + ">" + pathElements.get(i + 1) + "]"); } if (agg instanceof InternalNumericMetricsAggregation.SingleValue) { if (token.key != null && !token.key.equals("value")) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Unknown value key [" + token.key + "] for single-value metric aggregation [" + token.name + "]. Either use [value] as key or drop the key all together"); } @@ -240,7 +239,7 @@ public class AggregationPath { // we're left with a multi-value metric agg if (token.key == null) { - throw new ElasticsearchIllegalArgumentException("Invalid order path [" + this + + throw new IllegalArgumentException("Invalid order path [" + this + "]. Missing value key in [" + token + "] which refers to a multi-value metric aggregation"); } parent = null; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java b/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java index 35c381ec2a1..b423dd2f755 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/GeoPointParser.java @@ -66,7 +66,8 @@ public class GeoPointParser { lat = parser.doubleValue(); } else { throw new SearchParseException(context, "malformed [" + currentFieldName + "] geo point array in [" + - aggName + "] " + aggType + " aggregation. a geo point array must be of the form [lon, lat]"); + aggName + "] " + aggType + " aggregation. a geo point array must be of the form [lon, lat]", + parser.getTokenLocation()); } } point = new GeoPoint(lat, lon); @@ -88,7 +89,7 @@ public class GeoPointParser { } if (Double.isNaN(lat) || Double.isNaN(lon)) { throw new SearchParseException(context, "malformed [" + currentFieldName + "] geo point object. either [lat] or [lon] (or both) are " + - "missing in [" + aggName + "] " + aggType + " aggregation"); + "missing in [" + aggName + "] " + aggType + " aggregation", parser.getTokenLocation()); } point = new GeoPoint(lat, lon); return true; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java index d88f95642c3..dbefc2e2612 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceAggregatorFactory.java @@ -18,10 +18,16 @@ */ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.search.aggregations.*; +import org.elasticsearch.search.aggregations.AggregationExecutionException; +import org.elasticsearch.search.aggregations.AggregationInitializationException; +import org.elasticsearch.search.aggregations.Aggregator; +import org.elasticsearch.search.aggregations.AggregatorFactories; +import org.elasticsearch.search.aggregations.AggregatorFactory; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.aggregations.support.format.ValueFormat; import java.io.IOException; +import java.util.List; import java.util.Map; /** @@ -49,12 +55,13 @@ public abstract class ValuesSourceAggregatorFactory ext } @Override - public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException { + public Aggregator createInternal(AggregationContext context, Aggregator parent, boolean collectsFromSingleBucket, + List reducers, Map metaData) throws IOException { if (config.unmapped()) { - return createUnmapped(context, parent, metaData); + return createUnmapped(context, parent, reducers, metaData); } VS vs = context.valuesSource(config); - return doCreateInternal(vs, context, parent, collectsFromSingleBucket, metaData); + return doCreateInternal(vs, context, parent, collectsFromSingleBucket, reducers, metaData); } @Override @@ -64,9 +71,11 @@ public abstract class ValuesSourceAggregatorFactory ext } } - protected abstract Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, Map metaData) throws IOException; + protected abstract Aggregator createUnmapped(AggregationContext aggregationContext, Aggregator parent, List reducers, + Map metaData) throws IOException; - protected abstract Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, boolean collectsFromSingleBucket, Map metaData) throws IOException; + protected abstract Aggregator doCreateInternal(VS valuesSource, AggregationContext aggregationContext, Aggregator parent, + boolean collectsFromSingleBucket, List reducers, Map metaData) throws IOException; private void resolveValuesSourceConfigFromAncestors(String aggName, AggregatorFactory parent, Class requiredValuesSourceType) { ValuesSourceConfig config; diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java index aaffbbbfa4b..88c3f64b089 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/ValuesSourceParser.java @@ -101,7 +101,8 @@ public class ValuesSourceParser { if (targetValueType != null && input.valueType.isNotA(targetValueType)) { throw new SearchParseException(context, aggType.name() + " aggregation [" + aggName + "] was configured with an incompatible value type [" + input.valueType + "]. [" + aggType + - "] aggregation can only work on value of type [" + targetValueType + "]"); + "] aggregation can only work on value of type [" + targetValueType + "]", + parser.getTokenLocation()); } } else if (!scriptParameterParser.token(currentFieldName, token, parser)) { return false; @@ -186,7 +187,7 @@ public class ValuesSourceParser { } private SearchScript createScript() { - return input.script == null ? null : context.scriptService().search(context.lookup(), input.lang, input.script, input.scriptType, ScriptContext.Standard.AGGS, input.params); + return input.script == null ? null : context.scriptService().search(context.lookup(), new Script(input.lang, input.script, input.scriptType, input.params), ScriptContext.Standard.AGGS); } private static ValueFormat resolveFormat(@Nullable String format, @Nullable ValueType valueType) { diff --git a/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java b/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java index 55176b7c7fc..97699c37b21 100644 --- a/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java +++ b/src/main/java/org/elasticsearch/search/aggregations/support/format/ValueFormatterStreams.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.aggregations.support.format; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,7 +38,7 @@ public class ValueFormatterStreams { case ValueFormatter.Number.Pattern.ID: formatter = new ValueFormatter.Number.Pattern(); break; case ValueFormatter.GeoHash.ID: formatter = ValueFormatter.GEOHASH; break; case ValueFormatter.BooleanFormatter.ID: formatter = ValueFormatter.BOOLEAN; break; - default: throw new ElasticsearchIllegalArgumentException("Unknown value formatter with id [" + id + "]"); + default: throw new IllegalArgumentException("Unknown value formatter with id [" + id + "]"); } formatter.readFrom(in); return formatter; diff --git a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 6adfb53fd41..7ca72f5cef6 100644 --- a/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -23,8 +23,9 @@ import com.carrotsearch.hppc.ObjectFloatOpenHashMap; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; + import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.client.Requests; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -38,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.index.query.FilterBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilder; import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder; import org.elasticsearch.search.fetch.source.FetchSourceContext; import org.elasticsearch.search.highlight.HighlightBuilder; @@ -55,8 +57,9 @@ import java.util.List; import java.util.Map; /** - * A search source builder allowing to easily build search source. Simple construction - * using {@link org.elasticsearch.search.builder.SearchSourceBuilder#searchSource()}. + * A search source builder allowing to easily build search source. Simple + * construction using + * {@link org.elasticsearch.search.builder.SearchSourceBuilder#searchSource()}. * * @see org.elasticsearch.action.search.SearchRequest#source(SearchSourceBuilder) */ @@ -109,7 +112,6 @@ public class SearchSourceBuilder implements ToXContent { private List aggregations; private BytesReference aggregationsBinary; - private HighlightBuilder highlightBuilder; private SuggestBuilder suggestBuilder; @@ -123,7 +125,6 @@ public class SearchSourceBuilder implements ToXContent { private String[] stats; - /** * Constructs a new search source builder. */ @@ -190,8 +191,9 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets a filter that will be executed after the query has been executed and only has affect on the search hits - * (not aggregations). This filter is always executed as last filtering mechanism. + * Sets a filter that will be executed after the query has been executed and + * only has affect on the search hits (not aggregations). This filter is + * always executed as last filtering mechanism. */ public SearchSourceBuilder postFilter(FilterBuilder postFilter) { this.postFilterBuilder = postFilter; @@ -276,8 +278,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Should each {@link org.elasticsearch.search.SearchHit} be returned with an - * explanation of the hit (ranking). + * Should each {@link org.elasticsearch.search.SearchHit} be returned with + * an explanation of the hit (ranking). */ public SearchSourceBuilder explain(Boolean explain) { this.explain = explain; @@ -285,8 +287,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Should each {@link org.elasticsearch.search.SearchHit} be returned with a version - * associated with it. + * Should each {@link org.elasticsearch.search.SearchHit} be returned with a + * version associated with it. */ public SearchSourceBuilder version(Boolean version) { this.version = version; @@ -310,21 +312,24 @@ public class SearchSourceBuilder implements ToXContent { } /** - * An optional terminate_after to terminate the search after - * collecting terminateAfter documents + * An optional terminate_after to terminate the search after collecting + * terminateAfter documents */ public SearchSourceBuilder terminateAfter(int terminateAfter) { if (terminateAfter <= 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } this.terminateAfter = terminateAfter; return this; } + /** * Adds a sort against the given field name and the sort ordering. * - * @param name The name of the field - * @param order The sort ordering + * @param name + * The name of the field + * @param order + * The sort ordering */ public SearchSourceBuilder sort(String name, SortOrder order) { return sort(SortBuilders.fieldSort(name).order(order)); @@ -333,7 +338,8 @@ public class SearchSourceBuilder implements ToXContent { /** * Add a sort against the given field name. * - * @param name The name of the field to sort by + * @param name + * The name of the field to sort by */ public SearchSourceBuilder sort(String name) { return sort(SortBuilders.fieldSort(name)); @@ -351,8 +357,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Applies when sorting, and controls if scores will be tracked as well. Defaults to - * false. + * Applies when sorting, and controls if scores will be tracked as well. + * Defaults to false. */ public SearchSourceBuilder trackScores(boolean trackScores) { this.trackScores = trackScores; @@ -401,6 +407,7 @@ public class SearchSourceBuilder implements ToXContent { /** * Set the rescore window size for rescores that don't specify their window. + * * @param defaultRescoreWindowSize * @return */ @@ -465,7 +472,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Indicates whether the response should contain the stored _source for every hit + * Indicates whether the response should contain the stored _source for + * every hit * * @param fetch * @return @@ -480,22 +488,33 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard + * Indicate that _source should be returned with every hit, with an + * "include" and/or "exclude" set which can include simple wildcard * elements. * - * @param include An optional include (optionally wildcarded) pattern to filter the returned _source - * @param exclude An optional exclude (optionally wildcarded) pattern to filter the returned _source + * @param include + * An optional include (optionally wildcarded) pattern to filter + * the returned _source + * @param exclude + * An optional exclude (optionally wildcarded) pattern to filter + * the returned _source */ public SearchSourceBuilder fetchSource(@Nullable String include, @Nullable String exclude) { - return fetchSource(include == null ? Strings.EMPTY_ARRAY : new String[]{include}, exclude == null ? Strings.EMPTY_ARRAY : new String[]{exclude}); + return fetchSource(include == null ? Strings.EMPTY_ARRAY : new String[] { include }, exclude == null ? Strings.EMPTY_ARRAY + : new String[] { exclude }); } /** - * Indicate that _source should be returned with every hit, with an "include" and/or "exclude" set which can include simple wildcard + * Indicate that _source should be returned with every hit, with an + * "include" and/or "exclude" set which can include simple wildcard * elements. * - * @param includes An optional list of include (optionally wildcarded) pattern to filter the returned _source - * @param excludes An optional list of exclude (optionally wildcarded) pattern to filter the returned _source + * @param includes + * An optional list of include (optionally wildcarded) pattern to + * filter the returned _source + * @param excludes + * An optional list of exclude (optionally wildcarded) pattern to + * filter the returned _source */ public SearchSourceBuilder fetchSource(@Nullable String[] includes, @Nullable String[] excludes) { fetchSourceContext = new FetchSourceContext(includes, excludes); @@ -511,7 +530,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets no fields to be loaded, resulting in only id and type to be returned per field. + * Sets no fields to be loaded, resulting in only id and type to be returned + * per field. */ public SearchSourceBuilder noFields() { this.fieldNames = ImmutableList.of(); @@ -519,8 +539,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets the fields to load and return as part of the search request. If none are specified, - * the source of the document will be returned. + * Sets the fields to load and return as part of the search request. If none + * are specified, the source of the document will be returned. */ public SearchSourceBuilder fields(List fields) { this.fieldNames = fields; @@ -528,8 +548,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Adds the fields to load and return as part of the search request. If none are specified, - * the source of the document will be returned. + * Adds the fields to load and return as part of the search request. If none + * are specified, the source of the document will be returned. */ public SearchSourceBuilder fields(String... fields) { if (fieldNames == null) { @@ -542,8 +562,9 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Adds a field to load and return (note, it must be stored) as part of the search request. - * If none are specified, the source of the document will be return. + * Adds a field to load and return (note, it must be stored) as part of the + * search request. If none are specified, the source of the document will be + * return. */ public SearchSourceBuilder field(String name) { if (fieldNames == null) { @@ -554,7 +575,8 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Adds a field to load from the field data cache and return as part of the search request. + * Adds a field to load from the field data cache and return as part of the + * search request. */ public SearchSourceBuilder fieldDataField(String name) { if (fieldDataFields == null) { @@ -567,8 +589,10 @@ public class SearchSourceBuilder implements ToXContent { /** * Adds a script field under the given name with the provided script. * - * @param name The name of the field - * @param script The script + * @param name + * The name of the field + * @param script + * The script */ public SearchSourceBuilder scriptField(String name, String script) { return scriptField(name, null, script, null); @@ -577,9 +601,12 @@ public class SearchSourceBuilder implements ToXContent { /** * Adds a script field. * - * @param name The name of the field - * @param script The script to execute - * @param params The script parameters + * @param name + * The name of the field + * @param script + * The script to execute + * @param params + * The script parameters */ public SearchSourceBuilder scriptField(String name, String script, Map params) { return scriptField(name, null, script, params); @@ -588,10 +615,14 @@ public class SearchSourceBuilder implements ToXContent { /** * Adds a script field. * - * @param name The name of the field - * @param lang The language of the script - * @param script The script to execute - * @param params The script parameters (can be null) + * @param name + * The name of the field + * @param lang + * The language of the script + * @param script + * The script to execute + * @param params + * The script parameters (can be null) */ public SearchSourceBuilder scriptField(String name, String lang, String script, Map params) { if (scriptFields == null) { @@ -602,10 +633,13 @@ public class SearchSourceBuilder implements ToXContent { } /** - * Sets the boost a specific index will receive when the query is executeed against it. + * Sets the boost a specific index will receive when the query is executeed + * against it. * - * @param index The index to apply the boost against - * @param indexBoost The boost to apply to the index + * @param index + * The index to apply the boost against + * @param indexBoost + * The boost to apply to the index */ public SearchSourceBuilder indexBoost(String index, float indexBoost) { if (this.indexBoost == null) { @@ -630,7 +664,7 @@ public class SearchSourceBuilder implements ToXContent { toXContent(builder, ToXContent.EMPTY_PARAMS); return builder.string(); } catch (Exception e) { - return "{ \"error\" : \"" + e.getMessage() + "\"}"; + return "{ \"error\" : \"" + ExceptionsHelper.detailedMessage(e) + "\"}"; } } @@ -648,7 +682,6 @@ public class SearchSourceBuilder implements ToXContent { } } - @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -657,7 +690,7 @@ public class SearchSourceBuilder implements ToXContent { return builder; } - public void innerToXContent(XContentBuilder builder, Params params) throws IOException{ + public void innerToXContent(XContentBuilder builder, Params params) throws IOException { if (from != -1) { builder.field("from", from); } @@ -885,34 +918,4 @@ public class SearchSourceBuilder implements ToXContent { return params; } } - - private static class PartialField { - private final String name; - private final String[] includes; - private final String[] excludes; - - private PartialField(String name, String[] includes, String[] excludes) { - this.name = name; - this.includes = includes; - this.excludes = excludes; - } - - private PartialField(String name, String include, String exclude) { - this.name = name; - this.includes = include == null ? null : new String[]{include}; - this.excludes = exclude == null ? null : new String[]{exclude}; - } - - public String name() { - return name; - } - - public String[] includes() { - return includes; - } - - public String[] excludes() { - return excludes; - } - } } diff --git a/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java b/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java index 1894e26277d..4489d98c2c8 100644 --- a/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java +++ b/src/main/java/org/elasticsearch/search/controller/SearchPhaseController.java @@ -21,6 +21,7 @@ package org.elasticsearch.search.controller; import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.ObjectObjectOpenHashMap; +import com.google.common.collect.Lists; import org.apache.lucene.index.Term; import org.apache.lucene.search.CollectionStatistics; @@ -40,8 +41,11 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.aggregations.InternalAggregation; import org.elasticsearch.search.aggregations.InternalAggregation.ReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult; @@ -403,6 +407,19 @@ public class SearchPhaseController extends AbstractComponent { } } + if (aggregations != null) { + List reducers = firstResult.reducers(); + if (reducers != null) { + List newAggs = new ArrayList<>(Lists.transform(aggregations.asList(), Reducer.AGGREGATION_TRANFORM_FUNCTION)); + for (SiblingReducer reducer : reducers) { + InternalAggregation newAgg = reducer.doReduce(new InternalAggregations(newAggs), new ReduceContext(bigArrays, + scriptService)); + newAggs.add(newAgg); + } + aggregations = new InternalAggregations(newAggs); + } + } + InternalSearchHits searchHits = new InternalSearchHits(hits.toArray(new InternalSearchHit[hits.size()]), totalHits, maxScore); return new InternalSearchResponse(searchHits, aggregations, suggest, timedOut, terminatedEarly); diff --git a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java index e65be1c314b..94071f8f6bd 100644 --- a/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java +++ b/src/main/java/org/elasticsearch/search/dfs/CachedDfSource.java @@ -36,10 +36,13 @@ public class CachedDfSource extends IndexSearcher { private final int maxDoc; - public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity) throws IOException { + public CachedDfSource(IndexReader reader, AggregatedDfs aggregatedDfs, Similarity similarity, + QueryCache queryCache, QueryCachingPolicy queryCachingPolicy) throws IOException { super(reader); this.aggregatedDfs = aggregatedDfs; setSimilarity(similarity); + setQueryCache(queryCache); + setQueryCachingPolicy(queryCachingPolicy); if (aggregatedDfs.maxDoc() > Integer.MAX_VALUE) { maxDoc = Integer.MAX_VALUE; } else { diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java index bf896023803..0037bf322c3 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchPhase.java @@ -21,30 +21,32 @@ package org.elasticsearch.search.fetch; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; - import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.search.DocIdSet; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.util.BitDocIdSet; import org.apache.lucene.util.BitSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.text.StringAndBytesText; import org.elasticsearch.common.text.Text; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.support.XContentMapValues; -import org.elasticsearch.index.fieldvisitor.*; +import org.elasticsearch.index.fieldvisitor.AllFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.CustomFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.FieldsVisitor; +import org.elasticsearch.index.fieldvisitor.JustUidFieldsVisitor; +import org.elasticsearch.index.fieldvisitor.UidAndSourceFieldsVisitor; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMappers; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.SearchParseElement; @@ -62,16 +64,10 @@ import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHitField; import org.elasticsearch.search.internal.InternalSearchHits; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.lookup.LeafSearchLookup; import org.elasticsearch.search.lookup.SourceLookup; import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import java.util.*; import static com.google.common.collect.Lists.newArrayList; import static org.elasticsearch.common.xcontent.XContentFactory.contentBuilder; @@ -144,7 +140,7 @@ public class FetchPhase implements SearchPhase { if (x == null) { // Only fail if we know it is a object field, missing paths / fields shouldn't fail. if (context.smartNameObjectMapper(fieldName) != null) { - throw new ElasticsearchIllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); + throw new IllegalArgumentException("field [" + fieldName + "] isn't a leaf field"); } } else if (x.mapper().fieldType().stored()) { if (fieldNames == null) { @@ -210,7 +206,7 @@ public class FetchPhase implements SearchPhase { private int findRootDocumentIfNested(SearchContext context, LeafReaderContext subReaderContext, int subDocId) throws IOException { if (context.mapperService().hasNested()) { - BitDocIdSet nonNested = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE).getDocIdSet(subReaderContext); + BitDocIdSet nonNested = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()).getDocIdSet(subReaderContext); BitSet bits = nonNested.bits(); if (!bits.get(subDocId)) { return bits.nextSetBit(subDocId); @@ -287,7 +283,7 @@ public class FetchPhase implements SearchPhase { SourceLookup sourceLookup = context.lookup().source(); sourceLookup.setSegmentAndDocument(subReaderContext, nestedSubDocId); - ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context.bitsetFilterCache(), subReaderContext); + ObjectMapper nestedObjectMapper = documentMapper.findNestedObjectMapper(nestedSubDocId, context, subReaderContext); assert nestedObjectMapper != null; InternalSearchHit.InternalNestedIdentity nestedIdentity = getInternalNestedIdentity(context, nestedSubDocId, subReaderContext, documentMapper, nestedObjectMapper); @@ -310,7 +306,7 @@ public class FetchPhase implements SearchPhase { // nested field has an object value in the _source. This just means the nested field has just one inner object, which is valid, but uncommon. nestedParsedSource = ImmutableList.of((Map < String, Object >) extractedValue); } else { - throw new ElasticsearchIllegalStateException("extracted source isn't an object or an array"); + throw new IllegalStateException("extracted source isn't an object or an array"); } sourceAsMap = nestedParsedSource.get(nested.getOffset()); nested = nested.getChild(); @@ -374,38 +370,56 @@ public class FetchPhase implements SearchPhase { private InternalSearchHit.InternalNestedIdentity getInternalNestedIdentity(SearchContext context, int nestedSubDocId, LeafReaderContext subReaderContext, DocumentMapper documentMapper, ObjectMapper nestedObjectMapper) throws IOException { int currentParent = nestedSubDocId; ObjectMapper nestedParentObjectMapper; + StringBuilder field = new StringBuilder(); + ObjectMapper current = nestedObjectMapper; InternalSearchHit.InternalNestedIdentity nestedIdentity = null; do { - String field; Filter parentFilter; - nestedParentObjectMapper = documentMapper.findParentObjectMapper(nestedObjectMapper); + nestedParentObjectMapper = documentMapper.findParentObjectMapper(current); + if (field.length() != 0) { + field.insert(0, '.'); + } + field.insert(0, current.name()); if (nestedParentObjectMapper != null) { - field = nestedObjectMapper.name(); - if (!nestedParentObjectMapper.nested().isNested()) { - nestedObjectMapper = nestedParentObjectMapper; - // all right, the parent is a normal object field, so this is the best identiy we can give for that: - nestedIdentity = new InternalSearchHit.InternalNestedIdentity(field, 0, nestedIdentity); + if (nestedParentObjectMapper.nested().isNested() == false) { + current = nestedParentObjectMapper; continue; } parentFilter = nestedParentObjectMapper.nestedTypeFilter(); } else { - field = nestedObjectMapper.fullPath(); - parentFilter = NonNestedDocsFilter.INSTANCE; + parentFilter = Queries.newNonNestedFilter(); + } + + Filter childFilter = nestedObjectMapper.nestedTypeFilter(); + if (childFilter == null) { + current = nestedParentObjectMapper; + continue; + } + // We can pass down 'null' as acceptedDocs, because we're fetching matched docId that matched in the query phase. + DocIdSet childDocSet = childFilter.getDocIdSet(subReaderContext, null); + if (childDocSet == null) { + current = nestedParentObjectMapper; + continue; + } + DocIdSetIterator childIter = childDocSet.iterator(); + if (childIter == null) { + current = nestedParentObjectMapper; + continue; } BitDocIdSet parentBitSet = context.bitsetFilterCache().getBitDocIdSetFilter(parentFilter).getDocIdSet(subReaderContext); BitSet parentBits = parentBitSet.bits(); + int offset = 0; - BitDocIdSet nestedDocsBitSet = context.bitsetFilterCache().getBitDocIdSetFilter(nestedObjectMapper.nestedTypeFilter()).getDocIdSet(subReaderContext); - BitSet nestedBits = nestedDocsBitSet.bits(); int nextParent = parentBits.nextSetBit(currentParent); - for (int docId = nestedBits.nextSetBit(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = nestedBits.nextSetBit(docId + 1)) { + for (int docId = childIter.advance(currentParent + 1); docId < nextParent && docId != DocIdSetIterator.NO_MORE_DOCS; docId = childIter.nextDoc()) { offset++; } currentParent = nextParent; - nestedObjectMapper = nestedParentObjectMapper; - nestedIdentity = new InternalSearchHit.InternalNestedIdentity(field, offset, nestedIdentity); - } while (nestedParentObjectMapper != null); + current = nestedObjectMapper = nestedParentObjectMapper; + nestedIdentity = new InternalSearchHit.InternalNestedIdentity(field.toString(), offset, nestedIdentity); + field = new StringBuilder(); + } while (current != null); return nestedIdentity; } diff --git a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java index cbb8615da7f..05cbb4e178a 100644 --- a/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/FetchSubPhase.java @@ -22,8 +22,6 @@ import com.google.common.collect.Maps; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.InternalSearchHit; @@ -42,14 +40,12 @@ public interface FetchSubPhase { private LeafReaderContext readerContext; private int docId; private Map cache; - private IndexSearcher atomicIndexSearcher; public void reset(InternalSearchHit hit, LeafReaderContext context, int docId, IndexReader topLevelReader) { this.hit = hit; this.readerContext = context; this.docId = docId; this.topLevelReader = topLevelReader; - this.atomicIndexSearcher = null; } public InternalSearchHit hit() { @@ -64,15 +60,6 @@ public interface FetchSubPhase { return readerContext; } - public IndexSearcher searcher() { - if (atomicIndexSearcher == null) { - // Use the reader directly otherwise the IndexSearcher assertion will trip because it expects a top level - // reader context. - atomicIndexSearcher = new IndexSearcher(readerContext.reader()); - } - return atomicIndexSearcher; - } - public int docId() { return docId; } @@ -116,9 +103,9 @@ public interface FetchSubPhase { /** * Executes the hit level phase, with a reader and doc id (note, its a low level reader, and the matching doc). */ - void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException; + void hitExecute(SearchContext context, HitContext hitContext); boolean hitsExecutionNeeded(SearchContext context); - void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException; + void hitsExecute(SearchContext context, InternalSearchHit[] hits); } diff --git a/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java index 02bdfbe3cd7..026e421a589 100644 --- a/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/explain/ExplainFetchSubPhase.java @@ -47,7 +47,7 @@ public class ExplainFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -56,7 +56,7 @@ public class ExplainFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { try { final int topLevelDocId = hitContext.hit().docId(); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java index 2fcf23c3ee3..922d8ce11e7 100644 --- a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsFetchSubPhase.java @@ -61,7 +61,7 @@ public class FieldDataFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -70,7 +70,7 @@ public class FieldDataFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { for (FieldDataFieldsContext.FieldDataField field : context.fieldDataFields().fields()) { if (hitContext.hit().fieldsOrNull() == null) { hitContext.hit().fields(new HashMap(2)); diff --git a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java index c35398152dc..3c45148cdfa 100644 --- a/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/fielddata/FieldDataFieldsParseElement.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.fetch.fielddata; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; @@ -47,7 +46,7 @@ public class FieldDataFieldsParseElement implements SearchParseElement { String fieldName = parser.text(); context.fieldDataFields().add(new FieldDataFieldsContext.FieldDataField(fieldName)); } else { - throw new ElasticsearchIllegalStateException("Expected either a VALUE_STRING or an START_ARRAY but got " + token); + throw new IllegalStateException("Expected either a VALUE_STRING or an START_ARRAY but got " + token); } } } diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java index 557e4d5164a..8f0e2a1799c 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsContext.java @@ -31,12 +31,12 @@ import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocsCollector; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopScoreDocCollector; -import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitSet; import org.apache.lucene.util.Bits; @@ -50,7 +50,6 @@ import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.ParsedQuery; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.SearchHitField; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.internal.FilteredSearchContext; @@ -126,12 +125,12 @@ public final class InnerHitsContext { public TopDocs topDocs(SearchContext context, FetchSubPhase.HitContext hitContext) throws IOException { Filter rawParentFilter; if (parentObjectMapper == null) { - rawParentFilter = NonNestedDocsFilter.INSTANCE; + rawParentFilter = Queries.newNonNestedFilter(); } else { rawParentFilter = parentObjectMapper.nestedTypeFilter(); } BitDocIdSetFilter parentFilter = context.bitsetFilterCache().getBitDocIdSetFilter(rawParentFilter); - Filter childFilter = context.filterCache().cache(childObjectMapper.nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + Filter childFilter = childObjectMapper.nestedTypeFilter(); Query q = new FilteredQuery(query, new NestedChildrenFilter(parentFilter, childFilter, hitContext)); if (size() == 0) { @@ -168,6 +167,28 @@ public final class InnerHitsContext { this.atomicReader = hitContext.readerContext().reader(); } + @Override + public boolean equals(Object obj) { + if (super.equals(obj) == false) { + return false; + } + NestedChildrenFilter other = (NestedChildrenFilter) obj; + return parentFilter.equals(other.parentFilter) + && childFilter.equals(other.childFilter) + && docId == other.docId + && atomicReader.getCoreCacheKey() == other.atomicReader.getCoreCacheKey(); + } + + @Override + public int hashCode() { + int hash = super.hashCode(); + hash = 31 * hash + parentFilter.hashCode(); + hash = 31 * hash + childFilter.hashCode(); + hash = 31 * hash + docId; + hash = 31 * hash + atomicReader.getCoreCacheKey().hashCode(); + return hash; + } + @Override public String toString(String field) { return "NestedChildren(parent=" + parentFilter + ",child=" + childFilter + ")"; @@ -285,7 +306,7 @@ public final class InnerHitsContext { term = (String) fieldsVisitor.fields().get(ParentFieldMapper.NAME).get(0); } } - Filter filter = Queries.wrap(new TermQuery(new Term(field, term))); // Only include docs that have the current hit as parent + Filter filter = new QueryWrapperFilter(new TermQuery(new Term(field, term))); // Only include docs that have the current hit as parent Filter typeFilter = documentMapper.typeFilter(); // Only include docs that have this inner hits type. BooleanQuery filteredQuery = new BooleanQuery(); diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java index 74d0215ccfb..2a36797fcc8 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsFetchSubPhase.java @@ -77,7 +77,7 @@ public class InnerHitsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { Map results = new HashMap<>(); for (Map.Entry entry : context.innerHits().getInnerHits().entrySet()) { InnerHitsContext.BaseInnerHits innerHits = entry.getValue(); @@ -117,7 +117,7 @@ public class InnerHitsFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } // To get around cyclic dependency issue diff --git a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java index f282439b733..2642b7d862a 100644 --- a/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/innerhits/InnerHitsParseElement.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.fetch.innerhits; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; @@ -74,12 +73,12 @@ public class InnerHitsParseElement implements SearchParseElement { Map innerHitsMap = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchIllegalArgumentException("Unexpected token " + token + " in [inner_hits]: inner_hit definitions must start with the name of the inner_hit."); + throw new IllegalArgumentException("Unexpected token " + token + " in [inner_hits]: inner_hit definitions must start with the name of the inner_hit."); } final String innerHitName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new IllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } InnerHitsContext.BaseInnerHits innerHits = parseInnerHit(parser, parseContext, searchContext, innerHitName); if (innerHitsMap == null) { @@ -93,12 +92,12 @@ public class InnerHitsParseElement implements SearchParseElement { private InnerHitsContext.BaseInnerHits parseInnerHit(XContentParser parser, QueryParseContext parseContext, SearchContext searchContext, String innerHitName) throws Exception { XContentParser.Token token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchIllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); + throw new IllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); } String fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new IllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } String nestedPath = null; @@ -111,16 +110,16 @@ public class InnerHitsParseElement implements SearchParseElement { type = parser.currentName(); break; default: - throw new ElasticsearchIllegalArgumentException("Either path or type object must be defined"); + throw new IllegalArgumentException("Either path or type object must be defined"); } token = parser.nextToken(); if (token != XContentParser.Token.FIELD_NAME) { - throw new ElasticsearchIllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); + throw new IllegalArgumentException("Unexpected token " + token + " inside inner hit definition. Either specify [path] or [type] object"); } fieldName = parser.currentName(); token = parser.nextToken(); if (token != XContentParser.Token.START_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); + throw new IllegalArgumentException("Inner hit definition for [" + innerHitName + " starts with a [" + token + "], expected a [" + XContentParser.Token.START_OBJECT + "]."); } final InnerHitsContext.BaseInnerHits innerHits; @@ -129,17 +128,17 @@ public class InnerHitsParseElement implements SearchParseElement { } else if (type != null) { innerHits = parseParentChild(parser, parseContext, searchContext, fieldName); } else { - throw new ElasticsearchIllegalArgumentException("Either [path] or [type] must be defined"); + throw new IllegalArgumentException("Either [path] or [type] must be defined"); } // Completely consume all json objects: token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); + throw new IllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); } token = parser.nextToken(); if (token != XContentParser.Token.END_OBJECT) { - throw new ElasticsearchIllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); + throw new IllegalArgumentException("Expected [" + XContentParser.Token.END_OBJECT + "] token, but got a [" + token + "] token."); } return innerHits; @@ -149,7 +148,7 @@ public class InnerHitsParseElement implements SearchParseElement { ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); DocumentMapper documentMapper = searchContext.mapperService().documentMapper(type); if (documentMapper == null) { - throw new ElasticsearchIllegalArgumentException("type [" + type + "] doesn't exist"); + throw new IllegalArgumentException("type [" + type + "] doesn't exist"); } return new InnerHitsContext.ParentChildInnerHits(parseResult.context(), parseResult.query(), parseResult.childInnerHits(), documentMapper); } @@ -157,11 +156,11 @@ public class InnerHitsParseElement implements SearchParseElement { private InnerHitsContext.NestedInnerHits parseNested(XContentParser parser, QueryParseContext parseContext, SearchContext searchContext, String nestedPath) throws Exception { MapperService.SmartNameObjectMapper smartNameObjectMapper = searchContext.smartNameObjectMapper(nestedPath); if (smartNameObjectMapper == null || !smartNameObjectMapper.hasMapper()) { - throw new ElasticsearchIllegalArgumentException("path [" + nestedPath +"] doesn't exist"); + throw new IllegalArgumentException("path [" + nestedPath +"] doesn't exist"); } ObjectMapper childObjectMapper = smartNameObjectMapper.mapper(); if (!childObjectMapper.nested().isNested()) { - throw new ElasticsearchIllegalArgumentException("path [" + nestedPath +"] isn't nested"); + throw new IllegalArgumentException("path [" + nestedPath +"] isn't nested"); } ObjectMapper parentObjectMapper = parseContext.nestedScope().nextLevel(childObjectMapper); ParseResult parseResult = parseSubSearchContext(searchContext, parseContext, parser); diff --git a/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java index 8e6197eae0b..50ed21776ba 100644 --- a/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/matchedqueries/MatchedQueriesFetchSubPhase.java @@ -54,7 +54,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -64,7 +64,7 @@ public class MatchedQueriesFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { List matchedQueries = Lists.newArrayListWithCapacity(2); try { diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java index fcfe3a99946..05ec51efa77 100644 --- a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsFetchSubPhase.java @@ -21,7 +21,6 @@ package org.elasticsearch.search.fetch.script; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.script.LeafSearchScript; import org.elasticsearch.search.SearchHitField; @@ -62,7 +61,7 @@ public class ScriptFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -71,13 +70,13 @@ public class ScriptFieldsFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { for (ScriptFieldsContext.ScriptField scriptField : context.scriptFields().fields()) { LeafSearchScript leafScript; try { leafScript = scriptField.script().getLeafSearchScript(hitContext.readerContext()); } catch (IOException e1) { - throw new ElasticsearchIllegalStateException("Failed to load script", e1); + throw new IllegalStateException("Failed to load script", e1); } leafScript.setDocument(hitContext.docId()); diff --git a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java index 9a38b4b6358..10614656f09 100644 --- a/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java +++ b/src/main/java/org/elasticsearch/search/fetch/script/ScriptFieldsParseElement.java @@ -76,7 +76,7 @@ public class ScriptFieldsParseElement implements SearchParseElement { script = scriptValue.script(); scriptType = scriptValue.scriptType(); } - SearchScript searchScript = context.scriptService().search(context.lookup(), scriptParameterParser.lang(), script, scriptType, ScriptContext.Standard.SEARCH, params); + SearchScript searchScript = context.scriptService().search(context.lookup(), new Script(scriptParameterParser.lang(), script, scriptType, params), ScriptContext.Standard.SEARCH); context.scriptFields().add(new ScriptFieldsContext.ScriptField(fieldName, searchScript, ignoreException)); } } diff --git a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java index 76106861c80..445d6801e59 100644 --- a/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/source/FetchSourceSubPhase.java @@ -56,7 +56,7 @@ public class FetchSourceSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -65,7 +65,7 @@ public class FetchSourceSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { FetchSourceContext fetchSourceContext = context.fetchSourceContext(); assert fetchSourceContext.fetchSource(); if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) { diff --git a/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java b/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java index 1c2f7d4c846..6a5264dd625 100644 --- a/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java +++ b/src/main/java/org/elasticsearch/search/fetch/version/VersionFetchSubPhase.java @@ -49,7 +49,7 @@ public class VersionFetchSubPhase implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -58,7 +58,7 @@ public class VersionFetchSubPhase implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { // it might make sense to cache the TermDocs on a shared fetch context and just skip here) // it is going to mean we work on the high level multi reader and not the lower level reader as is // the case below... diff --git a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java index 6d34c7a58ee..161cdd490f1 100644 --- a/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/FastVectorHighlighter.java @@ -22,7 +22,6 @@ import com.google.common.collect.Maps; import org.apache.lucene.search.highlight.Encoder; import org.apache.lucene.search.vectorhighlight.*; import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.text.StringText; @@ -65,7 +64,7 @@ public class FastVectorHighlighter implements Highlighter { FieldMapper mapper = highlighterContext.mapper; if (!(mapper.fieldType().storeTermVectors() && mapper.fieldType().storeTermVectorOffsets() && mapper.fieldType().storeTermVectorPositions())) { - throw new ElasticsearchIllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter"); + throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with term vector with position offsets to be used with fast vector highlighter"); } Encoder encoder = field.fieldOptions().encoder().equals("html") ? HighlightUtils.Encoders.HTML : HighlightUtils.Encoders.DEFAULT; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java index 042146addca..cd3c12591f7 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlightPhase.java @@ -23,14 +23,12 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import org.apache.lucene.index.IndexOptions; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.internal.SourceFieldMapper; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.fetch.FetchSubPhase; @@ -66,7 +64,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { } @Override - public void hitsExecute(SearchContext context, InternalSearchHit[] hits) throws ElasticsearchException { + public void hitsExecute(SearchContext context, InternalSearchHit[] hits) { } @Override @@ -75,7 +73,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { } @Override - public void hitExecute(SearchContext context, HitContext hitContext) throws ElasticsearchException { + public void hitExecute(SearchContext context, HitContext hitContext) { Map highlightFields = newHashMap(); for (SearchContextHighlight.Field field : context.highlight().fields()) { List fieldNamesToHighlight; @@ -89,7 +87,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { if (context.highlight().forceSource(field)) { SourceFieldMapper sourceFieldMapper = context.mapperService().documentMapper(hitContext.hit().type()).sourceMapper(); if (!sourceFieldMapper.enabled()) { - throw new ElasticsearchIllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().type() + "] has disabled _source"); + throw new IllegalArgumentException("source is forced for fields " + fieldNamesToHighlight + " but type [" + hitContext.hit().type() + "] has disabled _source"); } } @@ -113,7 +111,7 @@ public class HighlightPhase extends AbstractComponent implements FetchSubPhase { Highlighter highlighter = highlighters.get(highlighterType); if (highlighter == null) { - throw new ElasticsearchIllegalArgumentException("unknown highlighter type [" + highlighterType + "] for the field [" + fieldName + "]"); + throw new IllegalArgumentException("unknown highlighter type [" + highlighterType + "] for the field [" + fieldName + "]"); } HighlighterContext.HighlightQuery highlightQuery; diff --git a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java index 24adef3bba5..3613327c679 100644 --- a/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java +++ b/src/main/java/org/elasticsearch/search/highlight/HighlighterParseElement.java @@ -21,8 +21,8 @@ package org.elasticsearch.search.highlight; import com.google.common.collect.Lists; import com.google.common.collect.Sets; + import org.apache.lucene.search.vectorhighlight.SimpleBoundaryScanner; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.IndexQueryParserService; @@ -70,8 +70,8 @@ public class HighlighterParseElement implements SearchParseElement { public void parse(XContentParser parser, SearchContext context) throws Exception { try { context.highlight(parse(parser, context.queryParserService())); - } catch (ElasticsearchIllegalArgumentException ex) { - throw new SearchParseException(context, "Error while trying to parse Highlighter element in request"); + } catch (IllegalArgumentException ex) { + throw new SearchParseException(context, "Error while trying to parse Highlighter element in request", parser.getTokenLocation()); } } @@ -110,7 +110,7 @@ public class HighlighterParseElement implements SearchParseElement { while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { if (highlightFieldName != null) { - throw new ElasticsearchIllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); + throw new IllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); } highlightFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_OBJECT) { @@ -118,7 +118,7 @@ public class HighlighterParseElement implements SearchParseElement { } } } else { - throw new ElasticsearchIllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); + throw new IllegalArgumentException("If highlighter fields is an array it must contain objects containing a single field"); } } } @@ -181,7 +181,7 @@ public class HighlighterParseElement implements SearchParseElement { final SearchContextHighlight.FieldOptions globalOptions = globalOptionsBuilder.build(); if (globalOptions.preTags() != null && globalOptions.postTags() == null) { - throw new ElasticsearchIllegalArgumentException("Highlighter global preTags are set, but global postTags are not set"); + throw new IllegalArgumentException("Highlighter global preTags are set, but global postTags are not set"); } final List fields = Lists.newArrayList(); diff --git a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java index e246fd214b9..d7e3580ab4d 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PlainHighlighter.java @@ -26,7 +26,6 @@ import org.apache.lucene.analysis.tokenattributes.OffsetAttribute; import org.apache.lucene.search.Query; import org.apache.lucene.search.highlight.*; import org.apache.lucene.util.CollectionUtil; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.text.StringText; import org.elasticsearch.common.text.Text; import org.elasticsearch.index.mapper.FieldMapper; @@ -83,7 +82,7 @@ public class PlainHighlighter implements Highlighter { } else if ("span".equals(field.fieldOptions().fragmenter())) { fragmenter = new SimpleSpanFragmenter(queryScorer, field.fieldOptions().fragmentCharSize()); } else { - throw new ElasticsearchIllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]"); + throw new IllegalArgumentException("unknown fragmenter option [" + field.fieldOptions().fragmenter() + "] for the field [" + highlighterContext.fieldName + "]"); } Formatter formatter = new SimpleHTMLFormatter(field.fieldOptions().preTags()[0], field.fieldOptions().postTags()[0]); diff --git a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java index 5da34fe5d5e..f30a0545d95 100644 --- a/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java +++ b/src/main/java/org/elasticsearch/search/highlight/PostingsHighlighter.java @@ -41,7 +41,6 @@ import org.apache.lucene.search.postingshighlight.WholeBreakIterator; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.UnicodeUtil; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.text.StringText; @@ -75,7 +74,7 @@ public class PostingsHighlighter implements Highlighter { FieldMapper fieldMapper = highlighterContext.mapper; SearchContextHighlight.Field field = highlighterContext.field; if (fieldMapper.fieldType().indexOptions() != IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS) { - throw new ElasticsearchIllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter"); + throw new IllegalArgumentException("the field [" + highlighterContext.fieldName + "] should be indexed with positions and offsets in the postings list to be used with postings highlighter"); } SearchContext context = highlighterContext.context; @@ -123,7 +122,7 @@ public class PostingsHighlighter implements Highlighter { //we highlight every value separately calling the highlight method multiple times, only if we need to have back a snippet per value (whole value) int values = mergeValues ? 1 : textsToHighlight.size(); for (int i = 0; i < values; i++) { - Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.searcher(), hitContext.docId(), numberOfFragments); + Snippet[] fieldSnippets = highlighter.highlightDoc(fieldMapper.names().indexName(), mapperHighlighterEntry.filteredQueryTerms, hitContext.reader(), hitContext.docId(), numberOfFragments); if (fieldSnippets != null) { for (Snippet fieldSnippet : fieldSnippets) { if (Strings.hasText(fieldSnippet.getText())) { diff --git a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java index 0e38c150030..482e3ef9153 100644 --- a/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java +++ b/src/main/java/org/elasticsearch/search/internal/ContextIndexSearcher.java @@ -20,9 +20,10 @@ package org.elasticsearch.search.internal; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.BooleanClause.Occur; +import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Collector; import org.apache.lucene.search.Explanation; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; @@ -151,7 +152,8 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { // this will only get applied to the actual search collector and not // to any scoped collectors, also, it will only be applied to the main collector // since that is where the filter should only work - collector = new FilteredCollector(collector, searchContext.parsedPostFilter().filter()); + final Weight filterWeight = createNormalizedWeight(searchContext.parsedPostFilter().filter(), false); + collector = new FilteredCollector(collector, filterWeight); } if (queryCollectors != null && !queryCollectors.isEmpty()) { ArrayList allCollectors = new ArrayList<>(queryCollectors.values()); @@ -194,7 +196,9 @@ public class ContextIndexSearcher extends IndexSearcher implements Releasable { if (searchContext.aliasFilter() == null) { return super.explain(query, doc); } - FilteredQuery filteredQuery = new FilteredQuery(query, searchContext.aliasFilter()); + BooleanQuery filteredQuery = new BooleanQuery(); + filteredQuery.add(query, Occur.MUST); + filteredQuery.add(searchContext.aliasFilter(), Occur.FILTER); return super.explain(filteredQuery, doc); } finally { searchContext.clearReleasables(Lifetime.COLLECTION); diff --git a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java index e4b7070b8c3..cd50594dc0e 100644 --- a/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/DefaultSearchContext.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Filter; -import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; @@ -44,7 +43,6 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; @@ -207,7 +205,7 @@ public class DefaultSearchContext extends SearchContext { } @Override - public void doClose() throws ElasticsearchException { + public void doClose() { if (scanContext != null) { scanContext.clear(); } @@ -235,14 +233,17 @@ public class DefaultSearchContext extends SearchContext { if (queryBoost() != 1.0f) { parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new BoostScoreFunction(queryBoost)), parsedQuery())); } - Filter searchFilter = searchFilter(types()); + Query searchFilter = searchFilter(types()); if (searchFilter != null) { if (Queries.isConstantMatchAllQuery(query())) { Query q = new ConstantScoreQuery(searchFilter); q.setBoost(query().getBoost()); parsedQuery(new ParsedQuery(q, parsedQuery())); } else { - parsedQuery(new ParsedQuery(new FilteredQuery(query(), searchFilter), parsedQuery())); + BooleanQuery filtered = new BooleanQuery(); + filtered.add(query(), Occur.MUST); + filtered.add(searchFilter, Occur.FILTER); + parsedQuery(new ParsedQuery(filtered, parsedQuery())); } } } @@ -255,12 +256,12 @@ public class DefaultSearchContext extends SearchContext { } BooleanQuery bq = new BooleanQuery(); if (filter != null) { - bq.add(filterCache().cache(filter, null, indexService.queryParserService().autoFilterCachePolicy()), Occur.MUST); + bq.add(filter, Occur.MUST); } if (aliasFilter != null) { bq.add(aliasFilter, Occur.MUST); } - return Queries.wrap(bq); + return new QueryWrapperFilter(bq); } @Override @@ -480,11 +481,6 @@ public class DefaultSearchContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return indexService.cache().filter(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return indexService.bitsetFilterCache(); diff --git a/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java b/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java index 3a6a48531f0..7fe5373b5e5 100644 --- a/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/FilteredSearchContext.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.internal; -import org.apache.lucene.search.Filter; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -79,7 +78,7 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Filter searchFilter(String[] types) { + public Query searchFilter(String[] types) { return in.searchFilter(types); } @@ -288,11 +287,6 @@ public abstract class FilteredSearchContext extends SearchContext { return in.bigArrays(); } - @Override - public FilterCache filterCache() { - return in.filterCache(); - } - @Override public BitsetFilterCache bitsetFilterCache() { return in.bitsetFilterCache(); @@ -364,7 +358,7 @@ public abstract class FilteredSearchContext extends SearchContext { } @Override - public Filter aliasFilter() { + public Query aliasFilter() { return in.aliasFilter(); } diff --git a/src/main/java/org/elasticsearch/search/internal/SearchContext.java b/src/main/java/org/elasticsearch/search/internal/SearchContext.java index f5377c98040..1ae74abaaf9 100644 --- a/src/main/java/org/elasticsearch/search/internal/SearchContext.java +++ b/src/main/java/org/elasticsearch/search/internal/SearchContext.java @@ -21,7 +21,7 @@ package org.elasticsearch.search.internal; import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; import com.google.common.collect.MultimapBuilder; -import org.apache.lucene.search.Filter; + import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; @@ -34,7 +34,6 @@ import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.analysis.AnalysisService; import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.cache.filter.FilterCache; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMappers; @@ -43,7 +42,6 @@ import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.ParsedFilter; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.query.QueryParseContext; -import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -112,7 +110,7 @@ public abstract class SearchContext implements Releasable { */ public abstract void preProcess(); - public abstract Filter searchFilter(String[] types); + public abstract Query searchFilter(String[] types); public abstract long id(); @@ -213,8 +211,6 @@ public abstract class SearchContext implements Releasable { public abstract BigArrays bigArrays(); - public abstract FilterCache filterCache(); - public abstract BitsetFilterCache bitsetFilterCache(); public abstract IndexFieldDataService fieldData(); @@ -243,7 +239,7 @@ public abstract class SearchContext implements Releasable { public abstract ParsedFilter parsedPostFilter(); - public abstract Filter aliasFilter(); + public abstract Query aliasFilter(); public abstract SearchContext parsedQuery(ParsedQuery query); diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java index bbdeece6ff0..7ae314e4756 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafDocLookup.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.lookup; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fielddata.IndexFieldDataService; import org.elasticsearch.index.fielddata.ScriptDocValues; @@ -76,7 +75,7 @@ public class LeafDocLookup implements Map { if (scriptValues == null) { FieldMapper mapper = mapperService.smartNameFieldMapper(fieldName, types); if (mapper == null) { - throw new ElasticsearchIllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + ""); + throw new IllegalArgumentException("No field found for [" + fieldName + "] in mapping with types " + Arrays.toString(types) + ""); } scriptValues = fieldDataService.getForField(mapper).load(reader).getScriptValues(); localCacheFieldData.put(fieldName, scriptValues); diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java index af246b8ba15..52e0872742a 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafFieldsLookup.java @@ -22,7 +22,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import org.apache.lucene.index.LeafReader; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.index.fieldvisitor.SingleFieldsVisitor; @@ -139,7 +138,7 @@ public class LeafFieldsLookup implements Map { if (data == null) { FieldMapper mapper = mapperService.smartNameFieldMapper(name, types); if (mapper == null) { - throw new ElasticsearchIllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types) + ""); + throw new IllegalArgumentException("No field found for [" + name + "] in mapping with types " + Arrays.toString(types) + ""); } data = new FieldLookup(mapper); cachedFieldData.put(name, data); diff --git a/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java b/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java index 4d8f618ba79..5e6a6df5fc0 100644 --- a/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java +++ b/src/main/java/org/elasticsearch/search/lookup/LeafIndexLookup.java @@ -18,12 +18,12 @@ */ package org.elasticsearch.search.lookup; -import org.apache.lucene.index.CompositeReader; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexReaderContext; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.util.MinimalMap; @@ -40,14 +40,11 @@ public class LeafIndexLookup extends MinimalMap { // The parent reader from which we can get proper field and term // statistics - private final CompositeReader parentReader; + private final IndexReader parentReader; // we need this later to get the field and term statistics of the shard private final IndexSearcher indexSearcher; - // we need this later to get the term statistics of the shard - private final IndexReaderContext indexReaderContext; - // current docId private int docId = -1; @@ -90,15 +87,9 @@ public class LeafIndexLookup extends MinimalMap { public LeafIndexLookup(LeafReaderContext ctx) { reader = ctx.reader(); - if (ctx.parent != null) { - parentReader = ctx.parent.reader(); - indexSearcher = new IndexSearcher(parentReader); - indexReaderContext = ctx.parent; - } else { - parentReader = null; - indexSearcher = null; - indexReaderContext = null; - } + parentReader = ReaderUtil.getTopLevelContext(ctx).reader(); + indexSearcher = new IndexSearcher(parentReader); + indexSearcher.setQueryCache(null); } public void setDocument(int docId) { @@ -175,13 +166,10 @@ public class LeafIndexLookup extends MinimalMap { } public IndexSearcher getIndexSearcher() { - if (indexSearcher == null) { - return new IndexSearcher(reader); - } return indexSearcher; } public IndexReaderContext getReaderContext() { - return indexReaderContext; + return getParentReader().getContext(); } } diff --git a/src/main/java/org/elasticsearch/search/query/FromParseElement.java b/src/main/java/org/elasticsearch/search/query/FromParseElement.java index 13e58caa471..21063a93d35 100644 --- a/src/main/java/org/elasticsearch/search/query/FromParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/FromParseElement.java @@ -35,7 +35,8 @@ public class FromParseElement implements SearchParseElement { if (token.isValue()) { int from = parser.intValue(); if (from < 0) { - throw new SearchParseException(context, "from is set to [" + from + "] and is expected to be higher or equal to 0"); + throw new SearchParseException(context, "from is set to [" + from + "] and is expected to be higher or equal to 0", + parser.getTokenLocation()); } context.from(from); } diff --git a/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java b/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java index 50167676cc7..e45006b2c32 100644 --- a/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java +++ b/src/main/java/org/elasticsearch/search/query/QuerySearchResult.java @@ -20,15 +20,20 @@ package org.elasticsearch.search.query; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.Version; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.aggregations.reducers.Reducer; +import org.elasticsearch.search.aggregations.reducers.ReducerStreams; +import org.elasticsearch.search.aggregations.reducers.SiblingReducer; import org.elasticsearch.search.suggest.Suggest; import java.io.IOException; +import java.util.ArrayList; +import java.util.List; import static org.elasticsearch.common.lucene.Lucene.readTopDocs; import static org.elasticsearch.common.lucene.Lucene.writeTopDocs; @@ -44,6 +49,7 @@ public class QuerySearchResult extends QuerySearchResultProvider { private int size; private TopDocs topDocs; private InternalAggregations aggregations; + private List reducers; private Suggest suggest; private boolean searchTimedOut; private Boolean terminatedEarly = null; @@ -114,6 +120,14 @@ public class QuerySearchResult extends QuerySearchResultProvider { this.aggregations = aggregations; } + public List reducers() { + return reducers; + } + + public void reducers(List reducers) { + this.reducers = reducers; + } + public Suggest suggest() { return suggest; } @@ -162,6 +176,16 @@ public class QuerySearchResult extends QuerySearchResultProvider { if (in.readBoolean()) { aggregations = InternalAggregations.readAggregations(in); } + if (in.readBoolean()) { + int size = in.readVInt(); + List reducers = new ArrayList<>(size); + for (int i = 0; i < size; i++) { + BytesReference type = in.readBytesReference(); + Reducer reducer = ReducerStreams.stream(type).readResult(in); + reducers.add((SiblingReducer) reducer); + } + this.reducers = reducers; + } if (in.readBoolean()) { suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); } @@ -187,6 +211,16 @@ public class QuerySearchResult extends QuerySearchResultProvider { out.writeBoolean(true); aggregations.writeTo(out); } + if (reducers == null) { + out.writeBoolean(false); + } else { + out.writeBoolean(true); + out.writeVInt(reducers.size()); + for (Reducer reducer : reducers) { + out.writeBytesReference(reducer.type().stream()); + reducer.writeTo(out); + } + } if (suggest == null) { out.writeBoolean(false); } else { diff --git a/src/main/java/org/elasticsearch/search/query/SizeParseElement.java b/src/main/java/org/elasticsearch/search/query/SizeParseElement.java index b729ea4cdb2..5560ec939c4 100644 --- a/src/main/java/org/elasticsearch/search/query/SizeParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/SizeParseElement.java @@ -35,7 +35,8 @@ public class SizeParseElement implements SearchParseElement { if (token.isValue()) { int size = parser.intValue(); if (size < 0) { - throw new SearchParseException(context, "size is set to [" + size + "] and is expected to be higher or equal to 0"); + throw new SearchParseException(context, "size is set to [" + size + "] and is expected to be higher or equal to 0", + parser.getTokenLocation()); } context.size(size); } diff --git a/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java b/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java index f4c531e2f27..fc4b64fe9ac 100644 --- a/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java +++ b/src/main/java/org/elasticsearch/search/query/TerminateAfterParseElement.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.query; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.internal.SearchContext; @@ -35,7 +34,7 @@ public class TerminateAfterParseElement implements SearchParseElement { if (token == XContentParser.Token.VALUE_NUMBER) { int terminateAfterCount = parser.intValue(); if (terminateAfterCount <= 0) { - throw new ElasticsearchIllegalArgumentException("terminateAfter must be > 0"); + throw new IllegalArgumentException("terminateAfter must be > 0"); } context.terminateAfter(parser.intValue()); } diff --git a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java index 047ba3fb0f1..f83a6030e7a 100644 --- a/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java +++ b/src/main/java/org/elasticsearch/search/rescore/QueryRescorer.java @@ -20,13 +20,10 @@ package org.elasticsearch.search.rescore; import org.apache.lucene.index.Term; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; import org.elasticsearch.index.query.ParsedQuery; @@ -148,35 +145,35 @@ public final class QueryRescorer implements Rescorer { ContextIndexSearcher searcher = context.searcher(); if (sourceExplanation == null) { // this should not happen but just in case - return new ComplexExplanation(false, 0.0f, "nothing matched"); + return Explanation.noMatch("nothing matched"); } // TODO: this isn't right? I.e., we are incorrectly pretending all first pass hits were rescored? If the requested docID was // beyond the top rescoreContext.window() in the first pass hits, we don't rescore it now? Explanation rescoreExplain = searcher.explain(rescore.query(), topLevelDocId); float primaryWeight = rescore.queryWeight(); - ComplexExplanation prim = new ComplexExplanation(sourceExplanation.isMatch(), - sourceExplanation.getValue() * primaryWeight, - "product of:"); - prim.addDetail(sourceExplanation); - prim.addDetail(new Explanation(primaryWeight, "primaryWeight")); + + Explanation prim; + if (sourceExplanation.isMatch()) { + prim = Explanation.match( + sourceExplanation.getValue() * primaryWeight, + "product of:", sourceExplanation, Explanation.match(primaryWeight, "primaryWeight")); + } else { + prim = Explanation.noMatch("First pass did not match", sourceExplanation); + } // NOTE: we don't use Lucene's Rescorer.explain because we want to insert our own description with which ScoreMode was used. Maybe // we should add QueryRescorer.explainCombine to Lucene? if (rescoreExplain != null && rescoreExplain.isMatch()) { float secondaryWeight = rescore.rescoreQueryWeight(); - ComplexExplanation sec = new ComplexExplanation(rescoreExplain.isMatch(), + Explanation sec = Explanation.match( rescoreExplain.getValue() * secondaryWeight, - "product of:"); - sec.addDetail(rescoreExplain); - sec.addDetail(new Explanation(secondaryWeight, "secondaryWeight")); + "product of:", + rescoreExplain, Explanation.match(secondaryWeight, "secondaryWeight")); ScoreMode scoreMode = rescore.scoreMode(); - ComplexExplanation calcExpl = new ComplexExplanation(); - calcExpl.setDescription(scoreMode + " of:"); - calcExpl.addDetail(prim); - calcExpl.setMatch(prim.isMatch()); - calcExpl.addDetail(sec); - calcExpl.setValue(scoreMode.combine(prim.getValue(), sec.getValue())); - return calcExpl; + return Explanation.match( + scoreMode.combine(prim.getValue(), sec.getValue()), + scoreMode + " of:", + prim, sec); } else { return prim; } @@ -212,10 +209,10 @@ public final class QueryRescorer implements Rescorer { } else if ("multiply".equals(sScoreMode)) { rescoreContext.setScoreMode(ScoreMode.Multiply); } else { - throw new ElasticsearchIllegalArgumentException("[rescore] illegal score_mode [" + sScoreMode + "]"); + throw new IllegalArgumentException("[rescore] illegal score_mode [" + sScoreMode + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("rescore doesn't support [" + fieldName + "]"); + throw new IllegalArgumentException("rescore doesn't support [" + fieldName + "]"); } } } @@ -315,7 +312,7 @@ public final class QueryRescorer implements Rescorer { try { context.searcher().createNormalizedWeight(((QueryRescoreContext) rescoreContext).query(), false).extractTerms(termsSet); } catch (IOException e) { - throw new ElasticsearchIllegalStateException("Failed to extract terms", e); + throw new IllegalStateException("Failed to extract terms", e); } } diff --git a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java index f87165e584b..7f9f2725fbc 100644 --- a/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java +++ b/src/main/java/org/elasticsearch/search/rescore/RescoreParseElement.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.rescore; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.search.SearchParseElement; @@ -62,12 +61,12 @@ public class RescoreParseElement implements SearchParseElement { if ("window_size".equals(fieldName)) { windowSize = parser.intValue(); } else { - throw new ElasticsearchIllegalArgumentException("rescore doesn't support [" + fieldName + "]"); + throw new IllegalArgumentException("rescore doesn't support [" + fieldName + "]"); } } } if (rescoreContext == null) { - throw new ElasticsearchIllegalArgumentException("missing rescore type"); + throw new IllegalArgumentException("missing rescore type"); } if (windowSize != null) { rescoreContext.setWindowSize(windowSize.intValue()); diff --git a/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java b/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java index 1c8f8bab840..88d2b0aae60 100644 --- a/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java +++ b/src/main/java/org/elasticsearch/search/rescore/RescorePhase.java @@ -54,7 +54,7 @@ public class RescorePhase extends AbstractComponent implements SearchPhase { } @Override - public void execute(SearchContext context) throws ElasticsearchException { + public void execute(SearchContext context) { try { TopDocs topDocs = context.queryResult().topDocs(); for (RescoreSearchContext ctx : context.rescore()) { diff --git a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java index 1e1445ec480..9e3f86dcf72 100644 --- a/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java +++ b/src/main/java/org/elasticsearch/search/sort/FieldSortBuilder.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.sort; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.FilterBuilder; @@ -53,7 +52,7 @@ public class FieldSortBuilder extends SortBuilder { */ public FieldSortBuilder(String fieldName) { if (fieldName == null) { - throw new ElasticsearchIllegalArgumentException("fieldName must not be null"); + throw new IllegalArgumentException("fieldName must not be null"); } this.fieldName = fieldName; } diff --git a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java index 4d090182813..ae16834f7af 100644 --- a/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/GeoDistanceSortParser.java @@ -21,23 +21,29 @@ package org.elasticsearch.search.sort; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.search.*; +import org.apache.lucene.search.DocIdSet; +import org.apache.lucene.search.FieldComparator; +import org.apache.lucene.search.Filter; +import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BitSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoDistance.FixedSourceDistance; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.unit.DistanceUnit; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.*; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.IndexGeoPointFieldData; +import org.elasticsearch.index.fielddata.MultiGeoPointValues; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.internal.SearchContext; @@ -129,12 +135,12 @@ public class GeoDistanceSortParser implements SortParser { } if (sortMode == MultiValueMode.SUM) { - throw new ElasticsearchIllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); + throw new IllegalArgumentException("sort_mode [sum] isn't supported for sorting by geo distance"); } FieldMapper mapper = context.smartNameFieldMapper(fieldName); if (mapper == null) { - throw new ElasticsearchIllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); + throw new IllegalArgumentException("failed to find mapper for [" + fieldName + "] for geo distance based sort"); } final MultiValueMode finalSortMode = sortMode; // final reference for use in the anonymous class final IndexGeoPointFieldData geoIndexFieldData = context.fieldData().getForField(mapper); @@ -157,12 +163,12 @@ public class GeoDistanceSortParser implements SortParser { final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java index 7e3ab76baa7..651e5ab3a8f 100644 --- a/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java +++ b/src/main/java/org/elasticsearch/search/sort/ScriptSortParser.java @@ -27,16 +27,21 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.index.fielddata.*; +import org.elasticsearch.index.fielddata.FieldData; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; +import org.elasticsearch.index.fielddata.NumericDoubleValues; +import org.elasticsearch.index.fielddata.SortedBinaryDocValues; +import org.elasticsearch.index.fielddata.SortedNumericDoubleValues; import org.elasticsearch.index.fielddata.fieldcomparator.BytesRefFieldComparatorSource; import org.elasticsearch.index.fielddata.fieldcomparator.DoubleValuesComparatorSource; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.script.LeafSearchScript; -import org.elasticsearch.script.ScriptService; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; +import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.SearchScript; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchParseException; @@ -113,15 +118,15 @@ public class ScriptSortParser implements SortParser { } if (script == null) { - throw new SearchParseException(context, "_script sorting requires setting the script to sort by"); + throw new SearchParseException(context, "_script sorting requires setting the script to sort by", parser.getTokenLocation()); } if (type == null) { - throw new SearchParseException(context, "_script sorting requires setting the type of the script"); + throw new SearchParseException(context, "_script sorting requires setting the type of the script", parser.getTokenLocation()); } - final SearchScript searchScript = context.scriptService().search(context.lookup(), scriptLang, script, scriptType, ScriptContext.Standard.SEARCH, params); + final SearchScript searchScript = context.scriptService().search(context.lookup(), new Script(scriptLang, script, scriptType, params), ScriptContext.Standard.SEARCH); if (STRING_SORT_TYPE.equals(type) && (sortMode == MultiValueMode.SUM || sortMode == MultiValueMode.AVG)) { - throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]"); + throw new SearchParseException(context, "type [string] doesn't support mode [" + sortMode + "]", parser.getTokenLocation()); } if (sortMode == null) { @@ -131,12 +136,12 @@ public class ScriptSortParser implements SortParser { // If nested_path is specified, then wrap the `fieldComparatorSource` in a `NestedFieldComparatorSource` final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { @@ -191,7 +196,7 @@ public class ScriptSortParser implements SortParser { }; break; default: - throw new SearchParseException(context, "custom script sort type [" + type + "] not supported"); + throw new SearchParseException(context, "custom script sort type [" + type + "] not supported", parser.getTokenLocation()); } return new SortField("_script", fieldComparatorSource, reverse); diff --git a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java index a003976342e..3dcaf5a4896 100644 --- a/src/main/java/org/elasticsearch/search/sort/SortParseElement.java +++ b/src/main/java/org/elasticsearch/search/sort/SortParseElement.java @@ -21,13 +21,14 @@ package org.elasticsearch.search.sort; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; + import org.apache.lucene.search.Filter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.join.BitDocIdSetFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldData.XFieldComparatorSource.Nested; @@ -36,7 +37,6 @@ import org.elasticsearch.index.mapper.core.LongFieldMapper; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.index.query.support.NestedInnerQueryParseSupport; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.MultiValueMode; import org.elasticsearch.search.SearchParseElement; import org.elasticsearch.search.SearchParseException; @@ -88,7 +88,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.VALUE_STRING) { addSortField(context, sortFields, parser.text(), false, null, null, null, null); } else { - throw new ElasticsearchIllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed"); + throw new IllegalArgumentException("malformed sort format, within the sort array, an object, or an actual string are allowed"); } } } else if (token == XContentParser.Token.VALUE_STRING) { @@ -96,7 +96,7 @@ public class SortParseElement implements SearchParseElement { } else if (token == XContentParser.Token.START_OBJECT) { addCompoundSortField(parser, context, sortFields); } else { - throw new ElasticsearchIllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); + throw new IllegalArgumentException("malformed sort format, either start with array, object, or an actual string"); } if (!sortFields.isEmpty()) { // optimize if we just sort on score non reversed, we don't really need sorting @@ -136,7 +136,7 @@ public class SortParseElement implements SearchParseElement { } else if (direction.equals("desc")) { reverse = !SCORE_FIELD_NAME.equals(fieldName); } else { - throw new ElasticsearchIllegalArgumentException("sort direction [" + fieldName + "] not supported"); + throw new IllegalArgumentException("sort direction [" + fieldName + "] not supported"); } addSortField(context, sortFields, fieldName, reverse, unmappedType, missing, sortMode, nestedFilterParseHelper); } else { @@ -173,7 +173,7 @@ public class SortParseElement implements SearchParseElement { } nestedFilterParseHelper.setPath(parser.text()); } else { - throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported"); + throw new IllegalArgumentException("sort option [" + innerJsonName + "] not supported"); } } else if (token == XContentParser.Token.START_OBJECT) { if ("nested_filter".equals(innerJsonName) || "nestedFilter".equals(innerJsonName)) { @@ -182,7 +182,7 @@ public class SortParseElement implements SearchParseElement { } nestedFilterParseHelper.filter(); } else { - throw new ElasticsearchIllegalArgumentException("sort option [" + innerJsonName + "] not supported"); + throw new IllegalArgumentException("sort option [" + innerJsonName + "] not supported"); } } } @@ -212,12 +212,12 @@ public class SortParseElement implements SearchParseElement { if (unmappedType != null) { fieldMapper = context.mapperService().unmappedFieldMapper(unmappedType); } else { - throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on"); + throw new SearchParseException(context, "No mapping found for [" + fieldName + "] in order to sort on", null); } } if (!fieldMapper.isSortable()) { - throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]"); + throw new SearchParseException(context, "Sorting not supported for field[" + fieldName + "]", null); } // Enable when we also know how to detect fields that do tokenize, but only emit one token @@ -252,12 +252,12 @@ public class SortParseElement implements SearchParseElement { } final Nested nested; if (nestedHelper != null && nestedHelper.getPath() != null) { - BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(NonNestedDocsFilter.INSTANCE); + BitDocIdSetFilter rootDocumentsFilter = context.bitsetFilterCache().getBitDocIdSetFilter(Queries.newNonNestedFilter()); Filter innerDocumentsFilter; if (nestedHelper.filterFound()) { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getInnerFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getInnerFilter(); } else { - innerDocumentsFilter = context.filterCache().cache(nestedHelper.getNestedObjectMapper().nestedTypeFilter(), null, context.queryParserService().autoFilterCachePolicy()); + innerDocumentsFilter = nestedHelper.getNestedObjectMapper().nestedTypeFilter(); } nested = new Nested(rootDocumentsFilter, innerDocumentsFilter); } else { diff --git a/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/src/main/java/org/elasticsearch/search/suggest/Suggest.java index 84b324c92c5..8a1f5f12636 100644 --- a/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -20,8 +20,7 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.Version; + import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Streamable; @@ -253,7 +252,7 @@ public class Suggest implements Iterable suggestion : toReduce) { if(suggestion.entries.size() != size) { - throw new ElasticsearchIllegalStateException("Can't merge suggest result, this might be caused by suggest calls " + + throw new IllegalStateException("Can't merge suggest result, this might be caused by suggest calls " + "across multiple indices with different analysis chains. Suggest entries have different sizes actual [" + suggestion.entries.size() + "] expected [" + size +"]"); } @@ -375,7 +374,7 @@ public class Suggest implements Iterable leader = toReduce.get(0); for (Entry entry : toReduce) { if (!leader.text.equals(entry.text)) { - throw new ElasticsearchIllegalStateException("Can't merge suggest entries, this might be caused by suggest calls " + + throw new IllegalStateException("Can't merge suggest entries, this might be caused by suggest calls " + "across multiple indices with different analysis chains. Suggest entries have different text actual [" + entry.text + "] expected [" + leader.text +"]"); } diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java index fa0cfbef139..5399820dd94 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestBuilder.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.client.Requests; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.ToXContent; @@ -32,8 +31,6 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.search.suggest.context.ContextMapping.ContextQuery; import org.elasticsearch.search.suggest.context.CategoryContextMapping; import org.elasticsearch.search.suggest.context.GeolocationContextMapping; -import org.elasticsearch.search.suggest.phrase.PhraseSuggestionBuilder; -import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; /** * Defines how to perform suggesting. This builders allows a number of global options to be specified and @@ -288,7 +285,7 @@ public class SuggestBuilder implements ToXContent { @SuppressWarnings("unchecked") public T size(int size) { if (size <= 0) { - throw new ElasticsearchIllegalArgumentException("Size must be positive"); + throw new IllegalArgumentException("Size must be positive"); } this.size = size; return (T)this; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java index 728eabb065c..edfe04108a0 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestParseElement.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.mapper.MapperService; @@ -63,7 +62,7 @@ public final class SuggestParseElement implements SearchParseElement { if ("text".equals(fieldName)) { globalText = parser.utf8Bytes(); } else { - throw new ElasticsearchIllegalArgumentException("[suggest] does not support [" + fieldName + "]"); + throw new IllegalArgumentException("[suggest] does not support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { String suggestionName = fieldName; @@ -77,14 +76,14 @@ public final class SuggestParseElement implements SearchParseElement { if ("text".equals(fieldName)) { suggestText = parser.utf8Bytes(); } else { - throw new ElasticsearchIllegalArgumentException("[suggest] does not support [" + fieldName + "]"); + throw new IllegalArgumentException("[suggest] does not support [" + fieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { if (suggestionName == null) { - throw new ElasticsearchIllegalArgumentException("Suggestion must have name"); + throw new IllegalArgumentException("Suggestion must have name"); } if (suggesters.get(fieldName) == null) { - throw new ElasticsearchIllegalArgumentException("Suggester[" + fieldName + "] not supported"); + throw new IllegalArgumentException("Suggester[" + fieldName + "] not supported"); } final SuggestContextParser contextParser = suggesters.get(fieldName).getContextParser(); suggestionContext = contextParser.parse(parser, mapperService); diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java b/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java index 05023ba0f42..209c7b68ce7 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestPhase.java @@ -66,7 +66,7 @@ public class SuggestPhase extends AbstractComponent implements SearchPhase { } @Override - public void execute(SearchContext context) throws ElasticsearchException { + public void execute(SearchContext context) { final SuggestionSearchContext suggest = context.suggest(); if (suggest == null) { return; diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java index 1e355ef3fcc..aaa37ecbdfe 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestUtils.java @@ -29,7 +29,6 @@ import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.automaton.LevenshteinAutomata; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.FastCharArrayReader; import org.elasticsearch.common.xcontent.XContentParser; @@ -65,7 +64,7 @@ public final class SuggestUtils { comparator = LUCENE_FREQUENCY; break; default: - throw new ElasticsearchIllegalArgumentException("Illegal suggest sort: " + suggestion.sort()); + throw new IllegalArgumentException("Illegal suggest sort: " + suggestion.sort()); } directSpellChecker.setComparator(comparator); directSpellChecker.setDistance(suggestion.stringDistance()); @@ -144,7 +143,7 @@ public final class SuggestUtils { } else if ("always".equals(suggestMode)) { return SuggestMode.SUGGEST_ALWAYS; } else { - throw new ElasticsearchIllegalArgumentException("Illegal suggest mode " + suggestMode); + throw new IllegalArgumentException("Illegal suggest mode " + suggestMode); } } @@ -154,7 +153,7 @@ public final class SuggestUtils { } else if ("frequency".equals(sortVal)) { return Suggest.Suggestion.Sort.FREQUENCY; } else { - throw new ElasticsearchIllegalArgumentException("Illegal suggest sort " + sortVal); + throw new IllegalArgumentException("Illegal suggest sort " + sortVal); } } @@ -171,7 +170,7 @@ public final class SuggestUtils { } else if ("ngram".equals(distanceVal)) { return new NGramDistance(); } else { - throw new ElasticsearchIllegalArgumentException("Illegal distance option " + distanceVal); + throw new IllegalArgumentException("Illegal distance option " + distanceVal); } } @@ -203,7 +202,7 @@ public final class SuggestUtils { } else if (Fields.MAX_EDITS.match(fieldName)) { suggestion.maxEdits(parser.intValue()); if (suggestion.maxEdits() < 1 || suggestion.maxEdits() > LevenshteinAutomata.MAXIMUM_SUPPORTED_DISTANCE) { - throw new ElasticsearchIllegalArgumentException("Illegal max_edits value " + suggestion.maxEdits()); + throw new IllegalArgumentException("Illegal max_edits value " + suggestion.maxEdits()); } } else if (Fields.MAX_INSPECTIONS.match(fieldName)) { suggestion.maxInspections(parser.intValue()); @@ -228,7 +227,7 @@ public final class SuggestUtils { String analyzerName = parser.text(); Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); } suggestion.setAnalyzer(analyzer); } else if ("field".equals(fieldName)) { @@ -248,11 +247,11 @@ public final class SuggestUtils { public static void verifySuggestion(MapperService mapperService, BytesRef globalText, SuggestionContext suggestion) { // Verify options and set defaults if (suggestion.getField() == null) { - throw new ElasticsearchIllegalArgumentException("The required field option is missing"); + throw new IllegalArgumentException("The required field option is missing"); } if (suggestion.getText() == null) { if (globalText == null) { - throw new ElasticsearchIllegalArgumentException("The required text option is missing"); + throw new IllegalArgumentException("The required text option is missing"); } suggestion.setText(globalText); } diff --git a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java index bf03527f698..2cb36f53914 100644 --- a/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java +++ b/src/main/java/org/elasticsearch/search/suggest/SuggestionSearchContext.java @@ -20,7 +20,6 @@ package org.elasticsearch.search.suggest; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import java.util.LinkedHashMap; import java.util.Map; @@ -87,7 +86,7 @@ public class SuggestionSearchContext { public void setSize(int size) { if (size <= 0) { - throw new ElasticsearchIllegalArgumentException("Size must be positive but was: " + size); + throw new IllegalArgumentException("Size must be positive but was: " + size); } this.size = size; } @@ -98,7 +97,7 @@ public class SuggestionSearchContext { public void setShardSize(int shardSize) { if (shardSize <= 0) { - throw new ElasticsearchIllegalArgumentException("ShardSize must be positive but was: " + shardSize); + throw new IllegalArgumentException("ShardSize must be positive but was: " + shardSize); } this.shardSize = shardSize; } diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java index 4606d824f72..5ffe9501dc9 100644 --- a/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java +++ b/src/main/java/org/elasticsearch/search/suggest/completion/Completion090PostingsFormat.java @@ -44,7 +44,6 @@ import org.apache.lucene.util.Accountable; import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.index.mapper.core.CompletionFieldMapper; @@ -176,7 +175,7 @@ public class Completion090PostingsFormat extends PostingsFormat { String providerName = input.readString(); CompletionLookupProvider completionLookupProvider = providers.get(providerName); if (completionLookupProvider == null) { - throw new ElasticsearchIllegalStateException("no provider with name [" + providerName + "] registered"); + throw new IllegalStateException("no provider with name [" + providerName + "] registered"); } // TODO: we could clone the ReadState and make it always forward IOContext.MERGE to prevent unecessary heap usage? delegateProducer = delegatePostingsFormat.fieldsProducer(state); diff --git a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java index aad44e31da5..05a14291cad 100644 --- a/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestParser.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.suggest.completion; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.unit.Fuzziness; @@ -93,10 +92,10 @@ public class CompletionSuggestParser implements SuggestContextParser { BytesReference bytes = builder.bytes(); contextParser = parser.contentType().xContent().createParser(bytes); } else { - throw new ElasticsearchIllegalArgumentException("suggester [completion] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester [completion] doesn't support field [" + fieldName + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[completion] doesn't support field [" + fieldName + "]"); } } @@ -106,14 +105,14 @@ public class CompletionSuggestParser implements SuggestContextParser { if (mapper != null) { if (mapper.requiresContext()) { if (contextParser == null) { - throw new ElasticsearchIllegalArgumentException("suggester [completion] requires context to be setup"); + throw new IllegalArgumentException("suggester [completion] requires context to be setup"); } else { contextParser.nextToken(); List contextQueries = ContextQuery.parseQueries(mapper.getContextMapping(), contextParser); suggestion.setContextQuery(contextQueries); } } else if (contextParser != null) { - throw new ElasticsearchIllegalArgumentException("suggester [completion] doesn't expect any context"); + throw new IllegalArgumentException("suggester [completion] doesn't expect any context"); } } return suggestion; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java index 03ce464b785..d97f7cf44e5 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/DirectCandidateGenerator.java @@ -26,7 +26,6 @@ import org.apache.lucene.search.spell.SuggestWord; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.search.suggest.SuggestUtils; import java.io.IOException; @@ -58,7 +57,7 @@ public final class DirectCandidateGenerator extends CandidateGenerator { public DirectCandidateGenerator(DirectSpellChecker spellchecker, String field, SuggestMode suggestMode, IndexReader reader, double nonErrorLikelihood, int numCandidates, Analyzer preFilter, Analyzer postFilter, Terms terms) throws IOException { if (terms == null) { - throw new ElasticsearchIllegalArgumentException("generator field [" + field + "] doesn't exist"); + throw new IllegalArgumentException("generator field [" + field + "] doesn't exist"); } this.spellchecker = spellchecker; this.field = field; diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java index 22a797db93c..7905d538848 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestParser.java @@ -22,7 +22,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser.Token; @@ -30,6 +29,7 @@ import org.elasticsearch.index.analysis.ShingleTokenFilterFactory; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.script.CompiledScript; +import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.mustache.MustacheScriptEngineService; @@ -62,24 +62,24 @@ public final class PhraseSuggestParser implements SuggestContextParser { if ("real_word_error_likelihood".equals(fieldName) || "realWorldErrorLikelihood".equals(fieldName)) { suggestion.setRealWordErrorLikelihood(parser.floatValue()); if (suggestion.realworldErrorLikelyhood() <= 0.0) { - throw new ElasticsearchIllegalArgumentException("real_word_error_likelihood must be > 0.0"); + throw new IllegalArgumentException("real_word_error_likelihood must be > 0.0"); } } else if ("confidence".equals(fieldName)) { suggestion.setConfidence(parser.floatValue()); if (suggestion.confidence() < 0.0) { - throw new ElasticsearchIllegalArgumentException("confidence must be >= 0.0"); + throw new IllegalArgumentException("confidence must be >= 0.0"); } } else if ("separator".equals(fieldName)) { suggestion.setSeparator(new BytesRef(parser.text())); } else if ("max_errors".equals(fieldName) || "maxErrors".equals(fieldName)) { suggestion.setMaxErrors(parser.floatValue()); if (suggestion.maxErrors() <= 0.0) { - throw new ElasticsearchIllegalArgumentException("max_error must be > 0.0"); + throw new IllegalArgumentException("max_error must be > 0.0"); } } else if ("gram_size".equals(fieldName) || "gramSize".equals(fieldName)) { suggestion.setGramSize(parser.intValue()); if (suggestion.gramSize() < 1) { - throw new ElasticsearchIllegalArgumentException("gram_size must be >= 1"); + throw new IllegalArgumentException("gram_size must be >= 1"); } gramSizeSet = true; } else if ("force_unigrams".equals(fieldName) || "forceUnigrams".equals(fieldName)) { @@ -87,11 +87,11 @@ public final class PhraseSuggestParser implements SuggestContextParser { } else if ("token_limit".equals(fieldName) || "tokenLimit".equals(fieldName)) { int tokenLimit = parser.intValue(); if (tokenLimit <= 0) { - throw new ElasticsearchIllegalArgumentException("token_limit must be >= 1"); + throw new IllegalArgumentException("token_limit must be >= 1"); } suggestion.setTokenLimit(tokenLimit); } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); } } } else if (token == Token.START_ARRAY) { @@ -111,7 +111,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { suggestion.addGenerator(generator); } } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); } } else if (token == Token.START_OBJECT) { if ("smoothing".equals(fieldName)) { @@ -126,7 +126,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { } else if ("post_tag".equals(fieldName) || "postTag".equals(fieldName)) { suggestion.setPostTag(parser.utf8Bytes()); } else { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "suggester[phrase][highlight] doesn't support field [" + fieldName + "]"); } } @@ -145,15 +145,15 @@ public final class PhraseSuggestParser implements SuggestContextParser { templateNameOrTemplateContent = parser.text(); } if (templateNameOrTemplateContent == null) { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] no query/filter found in collate object"); + throw new IllegalArgumentException("suggester[phrase][collate] no query/filter found in collate object"); } if (suggestion.getCollateFilterScript() != null) { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] filter already set, doesn't support additional [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase][collate] filter already set, doesn't support additional [" + fieldName + "]"); } if (suggestion.getCollateQueryScript() != null) { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase][collate] query already set, doesn't support additional [" + fieldName + "]"); } - CompiledScript compiledScript = suggester.scriptService().compile(MustacheScriptEngineService.NAME, templateNameOrTemplateContent, ScriptType.INLINE, ScriptContext.Standard.SEARCH); + CompiledScript compiledScript = suggester.scriptService().compile(new Script(MustacheScriptEngineService.NAME, templateNameOrTemplateContent, ScriptType.INLINE, null), ScriptContext.Standard.SEARCH); if ("query".equals(fieldName)) { suggestion.setCollateQueryScript(compiledScript); } else { @@ -167,28 +167,28 @@ public final class PhraseSuggestParser implements SuggestContextParser { if (parser.isBooleanValue()) { suggestion.setCollatePrune(parser.booleanValue()); } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase][collate] prune must be either 'true' or 'false'"); + throw new IllegalArgumentException("suggester[phrase][collate] prune must be either 'true' or 'false'"); } } else { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "suggester[phrase][collate] doesn't support field [" + fieldName + "]"); } } } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support array field [" + fieldName + "]"); } } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support field [" + fieldName + "]"); } } if (suggestion.getField() == null) { - throw new ElasticsearchIllegalArgumentException("The required field option is missing"); + throw new IllegalArgumentException("The required field option is missing"); } FieldMapper fieldMapper = mapperService.smartNameFieldMapper(suggestion.getField()); if (fieldMapper == null) { - throw new ElasticsearchIllegalArgumentException("No mapping found for field [" + suggestion.getField() + "]"); + throw new IllegalArgumentException("No mapping found for field [" + suggestion.getField() + "]"); } else if (suggestion.getAnalyzer() == null) { // no analyzer name passed in, so try the field's analyzer, or the default analyzer if (fieldMapper.searchAnalyzer() == null) { @@ -209,13 +209,13 @@ public final class PhraseSuggestParser implements SuggestContextParser { if (shingleFilterFactory != null) { suggestion.setGramSize(shingleFilterFactory.getMaxShingleSize()); if (suggestion.getAnalyzer() == null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams()) { - throw new ElasticsearchIllegalArgumentException("The default analyzer for field: [" + suggestion.getField() + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly"); + throw new IllegalArgumentException("The default analyzer for field: [" + suggestion.getField() + "] doesn't emit unigrams. If this is intentional try to set the analyzer explicitly"); } } } if (suggestion.generators().isEmpty()) { if (shingleFilterFactory != null && shingleFilterFactory.getMinShingleSize() > 1 && !shingleFilterFactory.getOutputUnigrams() && suggestion.getRequireUnigram()) { - throw new ElasticsearchIllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + suggestion.getField() + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly"); + throw new IllegalArgumentException("The default candidate generator for phrase suggest can't operate on field: [" + suggestion.getField() + "] since it doesn't emit unigrams. If this is intentional try to set the candidate generator field explicitly"); } // use a default generator on the same field DirectCandidateGenerator generator = new DirectCandidateGenerator(); @@ -245,20 +245,20 @@ public final class PhraseSuggestParser implements SuggestContextParser { if ("trigram_lambda".equals(fieldName) || "trigramLambda".equals(fieldName)) { lambdas[0] = parser.doubleValue(); if (lambdas[0] < 0) { - throw new ElasticsearchIllegalArgumentException("trigram_lambda must be positive"); + throw new IllegalArgumentException("trigram_lambda must be positive"); } } else if ("bigram_lambda".equals(fieldName) || "bigramLambda".equals(fieldName)) { lambdas[1] = parser.doubleValue(); if (lambdas[1] < 0) { - throw new ElasticsearchIllegalArgumentException("bigram_lambda must be positive"); + throw new IllegalArgumentException("bigram_lambda must be positive"); } } else if ("unigram_lambda".equals(fieldName) || "unigramLambda".equals(fieldName)) { lambdas[2] = parser.doubleValue(); if (lambdas[2] < 0) { - throw new ElasticsearchIllegalArgumentException("unigram_lambda must be positive"); + throw new IllegalArgumentException("unigram_lambda must be positive"); } } else { - throw new ElasticsearchIllegalArgumentException( + throw new IllegalArgumentException( "suggester[phrase][smoothing][linear] doesn't support field [" + fieldName + "]"); } } @@ -268,7 +268,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { sum += lambdas[i]; } if (Math.abs(sum - 1.0) > 0.001) { - throw new ElasticsearchIllegalArgumentException("linear smoothing lambdas must sum to 1"); + throw new IllegalArgumentException("linear smoothing lambdas must sum to 1"); } suggestion.setModel(new WordScorer.WordScorerFactory() { @Override @@ -320,7 +320,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { }); } else { - throw new ElasticsearchIllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); + throw new IllegalArgumentException("suggester[phrase] doesn't support object field [" + fieldName + "]"); } } } @@ -328,14 +328,14 @@ public final class PhraseSuggestParser implements SuggestContextParser { private void ensureNoSmoothing(PhraseSuggestionContext suggestion) { if (suggestion.model() != null) { - throw new ElasticsearchIllegalArgumentException("only one smoothing model supported"); + throw new IllegalArgumentException("only one smoothing model supported"); } } private void verifyGenerator(PhraseSuggestionContext.DirectCandidateGenerator suggestion) { // Verify options and set defaults if (suggestion.field() == null) { - throw new ElasticsearchIllegalArgumentException("The required field option is missing"); + throw new IllegalArgumentException("The required field option is missing"); } } @@ -345,7 +345,7 @@ public final class PhraseSuggestParser implements SuggestContextParser { if ("field".equals(fieldName)) { generator.setField(parser.text()); if (mapperService.smartNameFieldMapper(generator.field()) == null) { - throw new ElasticsearchIllegalArgumentException("No mapping found for field [" + generator.field() + "]"); + throw new IllegalArgumentException("No mapping found for field [" + generator.field() + "]"); } } else if ("size".equals(fieldName)) { generator.size(parser.intValue()); @@ -353,18 +353,18 @@ public final class PhraseSuggestParser implements SuggestContextParser { String analyzerName = parser.text(); Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); } generator.preFilter(analyzer); } else if ("post_filter".equals(fieldName) || "postFilter".equals(fieldName)) { String analyzerName = parser.text(); Analyzer analyzer = mapperService.analysisService().analyzer(analyzerName); if (analyzer == null) { - throw new ElasticsearchIllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); + throw new IllegalArgumentException("Analyzer [" + analyzerName + "] doesn't exists"); } generator.postFilter(analyzer); } else { - throw new ElasticsearchIllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]"); + throw new IllegalArgumentException("CandidateGenerator doesn't support [" + fieldName + "]"); } } } diff --git a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index afa5595688a..ef320eb60e5 100644 --- a/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.search.suggest.phrase; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.search.suggest.SuggestBuilder.SuggestionBuilder; @@ -59,7 +58,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder= 1"); + throw new IllegalArgumentException("gramSize must be >= 1"); } this.gramSize = gramSize; return this; @@ -164,7 +163,7 @@ public final class PhraseSuggestionBuilder extends SuggestionBuilder implements IndexMetaData.Custom { public static final String TYPE = "warmers"; - public static final Factory FACTORY = new Factory(); + public static final IndexWarmersMetaData PROTO = new IndexWarmersMetaData(); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + IndexWarmersMetaData that = (IndexWarmersMetaData) o; + + return entries.equals(that.entries); + + } + + @Override + public int hashCode() { + return entries.hashCode(); + } public static class Entry { private final String name; @@ -74,6 +93,29 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom { public Boolean queryCache() { return this.queryCache; } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + Entry entry = (Entry) o; + + if (!name.equals(entry.name)) return false; + if (!Arrays.equals(types, entry.types)) return false; + if (!source.equals(entry.source)) return false; + return !(queryCache != null ? !queryCache.equals(entry.queryCache) : entry.queryCache != null); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + Arrays.hashCode(types); + result = 31 * result + source.hashCode(); + result = 31 * result + (queryCache != null ? queryCache.hashCode() : 0); + return result; + } } private final ImmutableList entries; @@ -92,149 +134,143 @@ public class IndexWarmersMetaData implements IndexMetaData.Custom { return TYPE; } - public static class Factory implements IndexMetaData.Custom.Factory { - - @Override - public String type() { - return TYPE; + @Override + public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { + Entry[] entries = new Entry[in.readVInt()]; + for (int i = 0; i < entries.length; i++) { + String name = in.readString(); + String[] types = in.readStringArray(); + BytesReference source = null; + if (in.readBoolean()) { + source = in.readBytesReference(); + } + Boolean queryCache; + queryCache = in.readOptionalBoolean(); + entries[i] = new Entry(name, types, queryCache, source); } + return new IndexWarmersMetaData(entries); + } - @Override - public IndexWarmersMetaData readFrom(StreamInput in) throws IOException { - Entry[] entries = new Entry[in.readVInt()]; - for (int i = 0; i < entries.length; i++) { - String name = in.readString(); - String[] types = in.readStringArray(); - BytesReference source = null; - if (in.readBoolean()) { - source = in.readBytesReference(); - } - Boolean queryCache = null; - queryCache = in.readOptionalBoolean(); - entries[i] = new Entry(name, types, queryCache, source); - } - return new IndexWarmersMetaData(entries); - } - - @Override - public void writeTo(IndexWarmersMetaData warmers, StreamOutput out) throws IOException { - out.writeVInt(warmers.entries().size()); - for (Entry entry : warmers.entries()) { - out.writeString(entry.name()); - out.writeStringArray(entry.types()); - if (entry.source() == null) { - out.writeBoolean(false); - } else { - out.writeBoolean(true); - out.writeBytesReference(entry.source()); - } - out.writeOptionalBoolean(entry.queryCache()); - } - } - - @Override - public IndexWarmersMetaData fromMap(Map map) throws IOException { - // if it starts with the type, remove it - if (map.size() == 1 && map.containsKey(TYPE)) { - map = (Map) map.values().iterator().next(); - } - XContentBuilder builder = XContentFactory.smileBuilder().map(map); - try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { - // move to START_OBJECT - parser.nextToken(); - return fromXContent(parser); - } - } - - @Override - public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { - // we get here after we are at warmers token - String currentFieldName = null; - XContentParser.Token token; - List entries = new ArrayList<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_OBJECT) { - String name = currentFieldName; - List types = new ArrayList<>(2); - BytesReference source = null; - Boolean queryCache = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - if ("types".equals(currentFieldName)) { - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - types.add(parser.text()); - } - } - } else if (token == XContentParser.Token.START_OBJECT) { - if ("source".equals(currentFieldName)) { - XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); - source = builder.bytes(); - } - } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { - if ("source".equals(currentFieldName)) { - source = new BytesArray(parser.binaryValue()); - } - } else if (token.isValue()) { - if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { - queryCache = parser.booleanValue(); - } - } - } - entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); - } - - @Override - public void toXContent(IndexWarmersMetaData warmers, XContentBuilder builder, ToXContent.Params params) throws IOException { - //No need, IndexMetaData already writes it - //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); - for (Entry entry : warmers.entries()) { - toXContent(entry, builder, params); - } - //No need, IndexMetaData already writes it - //builder.endObject(); - } - - public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { - boolean binary = params.paramAsBoolean("binary", false); - builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); - builder.field("types", entry.types()); - if (entry.queryCache() != null) { - builder.field("queryCache", entry.queryCache()); - } - builder.field("source"); - if (binary) { - builder.value(entry.source()); + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVInt(entries().size()); + for (Entry entry : entries()) { + out.writeString(entry.name()); + out.writeStringArray(entry.types()); + if (entry.source() == null) { + out.writeBoolean(false); } else { - Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); - builder.map(mapping); + out.writeBoolean(true); + out.writeBytesReference(entry.source()); } - builder.endObject(); - } - - @Override - public IndexWarmersMetaData merge(IndexWarmersMetaData first, IndexWarmersMetaData second) { - List entries = Lists.newArrayList(); - entries.addAll(first.entries()); - for (Entry secondEntry : second.entries()) { - boolean found = false; - for (Entry firstEntry : first.entries()) { - if (firstEntry.name().equals(secondEntry.name())) { - found = true; - break; - } - } - if (!found) { - entries.add(secondEntry); - } - } - return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + out.writeOptionalBoolean(entry.queryCache()); } } + + @Override + public IndexWarmersMetaData fromMap(Map map) throws IOException { + // if it starts with the type, remove it + if (map.size() == 1 && map.containsKey(TYPE)) { + map = (Map) map.values().iterator().next(); + } + XContentBuilder builder = XContentFactory.smileBuilder().map(map); + try (XContentParser parser = XContentFactory.xContent(XContentType.SMILE).createParser(builder.bytes())) { + // move to START_OBJECT + parser.nextToken(); + return fromXContent(parser); + } + } + + @Override + public IndexWarmersMetaData fromXContent(XContentParser parser) throws IOException { + // we get here after we are at warmers token + String currentFieldName = null; + XContentParser.Token token; + List entries = new ArrayList<>(); + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_OBJECT) { + String name = currentFieldName; + List types = new ArrayList<>(2); + BytesReference source = null; + Boolean queryCache = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + currentFieldName = parser.currentName(); + } else if (token == XContentParser.Token.START_ARRAY) { + if ("types".equals(currentFieldName)) { + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + types.add(parser.text()); + } + } + } else if (token == XContentParser.Token.START_OBJECT) { + if ("source".equals(currentFieldName)) { + XContentBuilder builder = XContentFactory.jsonBuilder().map(parser.mapOrdered()); + source = builder.bytes(); + } + } else if (token == XContentParser.Token.VALUE_EMBEDDED_OBJECT) { + if ("source".equals(currentFieldName)) { + source = new BytesArray(parser.binaryValue()); + } + } else if (token.isValue()) { + if ("queryCache".equals(currentFieldName) || "query_cache".equals(currentFieldName)) { + queryCache = parser.booleanValue(); + } + } + } + entries.add(new Entry(name, types.size() == 0 ? Strings.EMPTY_ARRAY : types.toArray(new String[types.size()]), queryCache, source)); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + //No need, IndexMetaData already writes it + //builder.startObject(TYPE, XContentBuilder.FieldCaseConversion.NONE); + for (Entry entry : entries()) { + toXContent(entry, builder, params); + } + //No need, IndexMetaData already writes it + //builder.endObject(); + return builder; + } + + public static void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException { + boolean binary = params.paramAsBoolean("binary", false); + builder.startObject(entry.name(), XContentBuilder.FieldCaseConversion.NONE); + builder.field("types", entry.types()); + if (entry.queryCache() != null) { + builder.field("queryCache", entry.queryCache()); + } + builder.field("source"); + if (binary) { + builder.value(entry.source()); + } else { + Map mapping = XContentFactory.xContent(entry.source()).createParser(entry.source()).mapOrderedAndClose(); + builder.map(mapping); + } + builder.endObject(); + } + + @Override + public IndexMetaData.Custom mergeWith(IndexMetaData.Custom other) { + IndexWarmersMetaData second = (IndexWarmersMetaData) other; + List entries = Lists.newArrayList(); + entries.addAll(entries()); + for (Entry secondEntry : second.entries()) { + boolean found = false; + for (Entry firstEntry : entries()) { + if (firstEntry.name().equals(secondEntry.name())) { + found = true; + break; + } + } + if (!found) { + entries.add(secondEntry); + } + } + return new IndexWarmersMetaData(entries.toArray(new Entry[entries.size()])); + } } diff --git a/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/src/main/java/org/elasticsearch/snapshots/RestoreService.java index d0f3a35bcfa..b544922e0f8 100644 --- a/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -128,7 +128,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis this.allocationService = allocationService; this.createIndexService = createIndexService; this.dynamicSettings = dynamicSettings; - transportService.registerHandler(UPDATE_RESTORE_ACTION_NAME, new UpdateRestoreStateRequestHandler()); + transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest.class, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler()); clusterService.add(this); } @@ -190,6 +190,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis // Index doesn't exist - create it and start recovery // Make sure that the index we are about to create has a validate name createIndexService.validateIndexName(renamedIndex, currentState); + createIndexService.validateIndexSettings(renamedIndex, snapshotIndexMetaData.settings()); IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex); indexMdBuilder.settings(ImmutableSettings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); if (!request.includeAliases() && !snapshotIndexMetaData.aliases().isEmpty()) { @@ -937,7 +938,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis /** * Internal class that is used to send notifications about finished shard restore operations to master node */ - private static class UpdateIndexShardRestoreStatusRequest extends TransportRequest { + static class UpdateIndexShardRestoreStatusRequest extends TransportRequest { private SnapshotId snapshotId; private ShardId shardId; private ShardRestoreStatus status; @@ -984,22 +985,11 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis /** * Internal class that is used to send notifications about finished shard restore operations to master node */ - private class UpdateRestoreStateRequestHandler extends BaseTransportRequestHandler { - - @Override - public UpdateIndexShardRestoreStatusRequest newInstance() { - return new UpdateIndexShardRestoreStatusRequest(); - } - + class UpdateRestoreStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(UpdateIndexShardRestoreStatusRequest request, final TransportChannel channel) throws Exception { updateRestoreStateOnMaster(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } } diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotState.java b/src/main/java/org/elasticsearch/snapshots/SnapshotState.java index 51ca694b84f..b893a372d13 100644 --- a/src/main/java/org/elasticsearch/snapshots/SnapshotState.java +++ b/src/main/java/org/elasticsearch/snapshots/SnapshotState.java @@ -19,7 +19,6 @@ package org.elasticsearch.snapshots; -import org.elasticsearch.ElasticsearchIllegalArgumentException; /** * Represents the state that a snapshot can be in @@ -99,7 +98,7 @@ public enum SnapshotState { case 3: return PARTIAL; default: - throw new ElasticsearchIllegalArgumentException("No snapshot state for value [" + value + "]"); + throw new IllegalArgumentException("No snapshot state for value [" + value + "]"); } } } diff --git a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index ab7ec1e1755..bc00f9c7f18 100644 --- a/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -118,7 +118,7 @@ public class SnapshotsService extends AbstractLifecycleComponent { - - @Override - public UpdateIndexShardSnapshotStatusRequest newInstance() { - return new UpdateIndexShardSnapshotStatusRequest(); - } - + class UpdateSnapshotStateRequestHandler implements TransportRequestHandler { @Override public void messageReceived(UpdateIndexShardSnapshotStatusRequest request, final TransportChannel channel) throws Exception { innerUpdateSnapshotState(request); channel.sendResponse(TransportResponse.Empty.INSTANCE); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } } - - } diff --git a/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index 0a0236a8ca8..5eee9852968 100644 --- a/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -24,7 +24,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.Maps; import com.google.common.util.concurrent.MoreExecutors; import org.apache.lucene.util.Counter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.inject.Inject; @@ -142,7 +141,7 @@ public class ThreadPool extends AbstractComponent { executors.put(Names.SAME, new ExecutorHolder(MoreExecutors.directExecutor(), new Info(Names.SAME, "same"))); if (!executors.get(Names.GENERIC).info.getType().equals("cached")) { - throw new ElasticsearchIllegalArgumentException("generic thread pool must be of type cached"); + throw new IllegalArgumentException("generic thread pool must be of type cached"); } this.executors = ImmutableMap.copyOf(executors); this.scheduler = new ScheduledThreadPoolExecutor(1, EsExecutors.daemonThreadFactory(settings, "scheduler"), new EsAbortPolicy()); @@ -225,7 +224,7 @@ public class ThreadPool extends AbstractComponent { public Executor executor(String name) { Executor executor = executors.get(name).executor(); if (executor == null) { - throw new ElasticsearchIllegalArgumentException("No executor found for [" + name + "]"); + throw new IllegalArgumentException("No executor found for [" + name + "]"); } return executor; } @@ -411,7 +410,7 @@ public class ThreadPool extends AbstractComponent { Executor executor = EsExecutors.newScaling(min, size, keepAlive.millis(), TimeUnit.MILLISECONDS, threadFactory); return new ExecutorHolder(executor, new Info(name, type, min, size, keepAlive, null)); } - throw new ElasticsearchIllegalArgumentException("No type found [" + type + "], for [" + name + "]"); + throw new IllegalArgumentException("No type found [" + type + "], for [" + name + "]"); } public void updateSettings(Settings settings) { diff --git a/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java b/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java index 739ad3040cf..8cbe6b4c960 100644 --- a/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java +++ b/src/main/java/org/elasticsearch/transport/PlainTransportFuture.java @@ -20,7 +20,6 @@ package org.elasticsearch.transport; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.common.util.concurrent.BaseFuture; @@ -40,12 +39,12 @@ public class PlainTransportFuture extends BaseFutur } @Override - public V txGet() throws ElasticsearchException { + public V txGet() { try { return get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { throw (ElasticsearchException) e.getCause(); @@ -56,13 +55,13 @@ public class PlainTransportFuture extends BaseFutur } @Override - public V txGet(long timeout, TimeUnit unit) throws ElasticsearchException { + public V txGet(long timeout, TimeUnit unit) { try { return get(timeout, unit); } catch (TimeoutException e) { throw new ElasticsearchTimeoutException(e.getMessage()); } catch (InterruptedException e) { - throw new ElasticsearchIllegalStateException("Future got interrupted", e); + throw new IllegalStateException("Future got interrupted", e); } catch (ExecutionException e) { if (e.getCause() instanceof ElasticsearchException) { throw (ElasticsearchException) e.getCause(); diff --git a/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java new file mode 100644 index 00000000000..2b8caf8f055 --- /dev/null +++ b/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -0,0 +1,74 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.transport; + + +import java.lang.reflect.Constructor; + +/** + * + */ +public class RequestHandlerRegistry { + + private final String action; + private final Constructor requestConstructor; + private final TransportRequestHandler handler; + private final boolean forceExecution; + private final String executor; + + RequestHandlerRegistry(String action, Class request, TransportRequestHandler handler, + String executor, boolean forceExecution) { + this.action = action; + try { + this.requestConstructor = request.getDeclaredConstructor(); + } catch (NoSuchMethodException e) { + throw new IllegalStateException("failed to create constructor (does it have a default constructor?) for request " + request, e); + } + this.requestConstructor.setAccessible(true); + assert newRequest() != null; + this.handler = handler; + this.forceExecution = forceExecution; + this.executor = executor; + } + + public String getAction() { + return action; + } + + public Request newRequest() { + try { + return requestConstructor.newInstance(); + } catch (Exception e) { + throw new IllegalStateException("failed to instantiate request ", e); + } + } + + public TransportRequestHandler getHandler() { + return handler; + } + + public boolean isForceExecution() { + return forceExecution; + } + + public String getExecutor() { + return executor; + } +} diff --git a/src/main/java/org/elasticsearch/transport/TransportFuture.java b/src/main/java/org/elasticsearch/transport/TransportFuture.java index 1b330d2b1dc..c4bfcb7afea 100644 --- a/src/main/java/org/elasticsearch/transport/TransportFuture.java +++ b/src/main/java/org/elasticsearch/transport/TransportFuture.java @@ -33,12 +33,12 @@ public interface TransportFuture extends Future { * Waits if necessary for the computation to complete, and then * retrieves its result. */ - V txGet() throws ElasticsearchException; + V txGet(); /** * Waits if necessary for at most the given time for the computation * to complete, and then retrieves its result, if available. */ - V txGet(long timeout, TimeUnit unit) throws ElasticsearchException; + V txGet(long timeout, TimeUnit unit); } diff --git a/src/main/java/org/elasticsearch/transport/TransportModule.java b/src/main/java/org/elasticsearch/transport/TransportModule.java index 484c65ec622..773d7d2296e 100644 --- a/src/main/java/org/elasticsearch/transport/TransportModule.java +++ b/src/main/java/org/elasticsearch/transport/TransportModule.java @@ -19,18 +19,15 @@ package org.elasticsearch.transport; -import org.elasticsearch.action.admin.cluster.node.liveness.TransportLivenessAction; +import com.google.common.base.Preconditions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.plugins.Plugin; import org.elasticsearch.transport.local.LocalTransport; import org.elasticsearch.transport.netty.NettyTransport; -import static org.elasticsearch.common.Preconditions.checkNotNull; - /** * */ @@ -78,15 +75,15 @@ public class TransportModule extends AbstractModule { } public void setTransportService(Class transportService, String source) { - checkNotNull(transportService, "Configured transport service may not be null"); - checkNotNull(source, "Plugin, that changes transport service may not be null"); + Preconditions.checkNotNull(transportService, "Configured transport service may not be null"); + Preconditions.checkNotNull(source, "Plugin, that changes transport service may not be null"); this.configuredTransportService = transportService; this.configuredTransportServiceSource = source; } public void setTransport(Class transport, String source) { - checkNotNull(transport, "Configured transport may not be null"); - checkNotNull(source, "Plugin, that changes transport may not be null"); + Preconditions.checkNotNull(transport, "Configured transport may not be null"); + Preconditions.checkNotNull(source, "Plugin, that changes transport may not be null"); this.configuredTransport = transport; this.configuredTransportSource = source; } diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java b/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java index fd62f30418b..5b5e58de06d 100644 --- a/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java +++ b/src/main/java/org/elasticsearch/transport/TransportRequestHandler.java @@ -24,14 +24,5 @@ package org.elasticsearch.transport; */ public interface TransportRequestHandler { - T newInstance(); - void messageReceived(T request, TransportChannel channel) throws Exception; - - String executor(); - - /** - * See {@link org.elasticsearch.common.util.concurrent.AbstractRunnable#isForceExecution()}. - */ - boolean isForceExecution(); } diff --git a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java index 5452b020be0..0d92d00f144 100644 --- a/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java +++ b/src/main/java/org/elasticsearch/transport/TransportRequestOptions.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.unit.TimeValue; /** @@ -52,7 +51,7 @@ public class TransportRequestOptions { } else if ("ping".equalsIgnoreCase(type)) { return PING; } else { - throw new ElasticsearchIllegalArgumentException("failed to match transport type for [" + type + "]"); + throw new IllegalArgumentException("failed to match transport type for [" + type + "]"); } } } diff --git a/src/main/java/org/elasticsearch/transport/TransportService.java b/src/main/java/org/elasticsearch/transport/TransportService.java index 975f669a592..7b635d0f851 100644 --- a/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/src/main/java/org/elasticsearch/transport/TransportService.java @@ -21,7 +21,6 @@ package org.elasticsearch.transport; import com.google.common.collect.ImmutableMap; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.settings.ClusterDynamicSettings; import org.elasticsearch.cluster.settings.DynamicSettings; @@ -61,8 +60,8 @@ public class TransportService extends AbstractLifecycleComponent serverHandlers = ImmutableMap.of(); - final Object serverHandlersMutex = new Object(); + volatile ImmutableMap requestHandlers = ImmutableMap.of(); + final Object requestHandlerMutex = new Object(); final ConcurrentMapLong clientHandlers = ConcurrentCollections.newConcurrentMapLongWithAggressiveConcurrency(); @@ -160,7 +159,7 @@ public class TransportService extends AbstractLifecycleComponent void sendRequest(final DiscoveryNode node, final String action, final TransportRequest request, final TransportRequestOptions options, TransportResponseHandler handler) { if (node == null) { - throw new ElasticsearchIllegalStateException("can't send request to a null node"); + throw new IllegalStateException("can't send request to a null node"); } final long requestId = newRequestId(); final TimeoutHandler timeoutHandler; @@ -324,25 +323,25 @@ public class TransportService extends AbstractLifecycleComponent void registerRequestHandler(String action, Class request, String executor, TransportRequestHandler handler) { + registerRequestHandler(action, request, executor, false, handler); + } + + /** + * Registers a new request handler + * @param action The action the request handler is associated with + * @param request The request class that will be used to constrcut new instances for streaming + * @param executor The executor the request handling will be executed on + * @param forceExecution Force execution on the executor queue and never reject it + * @param handler The handler itself that implements the request handling + */ + public void registerRequestHandler(String action, Class request, String executor, boolean forceExecution, TransportRequestHandler handler) { + synchronized (requestHandlerMutex) { + RequestHandlerRegistry reg = new RequestHandlerRegistry<>(action, request, handler, executor, forceExecution); + RequestHandlerRegistry replaced = requestHandlers.get(reg.getAction()); + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + if (replaced != null) { + logger.warn("registered two transport handlers for action {}, handlers: {}, {}", reg.getAction(), reg.getHandler(), replaced.getHandler()); } } } public void removeHandler(String action) { - synchronized (serverHandlersMutex) { - serverHandlers = MapBuilder.newMapBuilder(serverHandlers).remove(action).immutableMap(); + synchronized (requestHandlerMutex) { + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).remove(action).immutableMap(); } } - protected TransportRequestHandler getHandler(String action) { - return serverHandlers.get(action); + protected RequestHandlerRegistry getRequestHandler(String action) { + return requestHandlers.get(action); } protected class Adapter implements TransportServiceAdapter { @@ -460,8 +479,8 @@ public class TransportService extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { String address = settings.get(TRANSPORT_LOCAL_ADDRESS); if (address == null) { address = Long.toString(transportAddressIdGenerator.incrementAndGet()); @@ -110,7 +110,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { transports.remove(localAddress); // now, go over all the transports connected to me, and raise disconnected event for (final LocalTransport targetTransport : transports.values()) { @@ -123,7 +123,7 @@ public class LocalTransport extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { ThreadPool.terminate(workers, 10, TimeUnit.SECONDS); } @@ -262,27 +262,27 @@ public class LocalTransport extends AbstractLifecycleComponent implem transportServiceAdapter.onRequestReceived(requestId, action); final LocalTransportChannel transportChannel = new LocalTransportChannel(this, transportServiceAdapter, sourceTransport, action, requestId, version); try { - final TransportRequestHandler handler = transportServiceAdapter.handler(action); - if (handler == null) { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { throw new ActionNotFoundTransportException("Action [" + action + "] not found"); } - final TransportRequest request = handler.newInstance(); + final TransportRequest request = reg.newRequest(); request.remoteAddress(sourceTransport.boundAddress.publishAddress()); request.readFrom(stream); - if (ThreadPool.Names.SAME.equals(handler.executor())) { + if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } else { - threadPool.executor(handler.executor()).execute(new AbstractRunnable() { + threadPool.executor(reg.getExecutor()).execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } @Override public boolean isForceExecution() { - return handler.isForceExecution(); + return reg.isForceExecution(); } @Override diff --git a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java index 426030aef2a..b0689df6f2c 100644 --- a/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java +++ b/src/main/java/org/elasticsearch/transport/netty/MessageChannelHandler.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport.netty; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.common.component.Lifecycle; import org.elasticsearch.common.compress.Compressor; @@ -101,7 +100,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { sb.append(buffer.getByte(offset + i)).append(","); } sb.append("]"); - throw new ElasticsearchIllegalStateException(sb.toString()); + throw new IllegalStateException(sb.toString()); } wrappedStream = compressor.streamInput(streamIn); } else { @@ -209,18 +208,18 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { transportServiceAdapter.onRequestReceived(requestId, action); final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, profileName); try { - final TransportRequestHandler handler = transportServiceAdapter.handler(action); - if (handler == null) { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { throw new ActionNotFoundTransportException(action); } - final TransportRequest request = handler.newInstance(); + final TransportRequest request = reg.newRequest(); request.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress())); request.readFrom(buffer); - if (ThreadPool.Names.SAME.equals(handler.executor())) { + if (ThreadPool.Names.SAME.equals(reg.getExecutor())) { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } else { - threadPool.executor(handler.executor()).execute(new RequestHandler(handler, request, transportChannel, action)); + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); } } catch (Throwable e) { try { @@ -260,27 +259,25 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { } class RequestHandler extends AbstractRunnable { - private final TransportRequestHandler handler; + private final RequestHandlerRegistry reg; private final TransportRequest request; private final NettyTransportChannel transportChannel; - private final String action; - public RequestHandler(TransportRequestHandler handler, TransportRequest request, NettyTransportChannel transportChannel, String action) { - this.handler = handler; + public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, NettyTransportChannel transportChannel) { + this.reg = reg; this.request = request; this.transportChannel = transportChannel; - this.action = action; } @SuppressWarnings({"unchecked"}) @Override protected void doRun() throws Exception { - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } @Override public boolean isForceExecution() { - return handler.isForceExecution(); + return reg.isForceExecution(); } @Override @@ -290,7 +287,7 @@ public class MessageChannelHandler extends SimpleChannelUpstreamHandler { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); logger.warn("Actual Exception", e); } } diff --git a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java index 1d9914e4b81..e2ad4efe3f5 100644 --- a/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java +++ b/src/main/java/org/elasticsearch/transport/netty/NettyTransport.java @@ -184,13 +184,13 @@ public class NettyTransport extends AbstractLifecycleComponent implem // we want to have at least 1 for reg/state/ping if (this.connectionsPerNodeReg == 0) { - throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.reg] to 0"); + throw new IllegalArgumentException("can't set [connection_per_node.reg] to 0"); } if (this.connectionsPerNodePing == 0) { - throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.ping] to 0"); + throw new IllegalArgumentException("can't set [connection_per_node.ping] to 0"); } if (this.connectionsPerNodeState == 0) { - throw new ElasticsearchIllegalArgumentException("can't set [connection_per_node.state] to 0"); + throw new IllegalArgumentException("can't set [connection_per_node.state] to 0"); } long defaultReceiverPredictor = 512 * 1024; @@ -234,7 +234,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { boolean success = false; try { clientBootstrap = createClientBootstrap(); @@ -487,7 +487,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { final CountDownLatch latch = new CountDownLatch(1); // make sure we run it on another thread than a possible IO handler thread threadPool.generic().execute(new Runnable() { @@ -559,7 +559,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override @@ -736,7 +736,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem public void connectToNode(DiscoveryNode node, boolean light) { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport"); + throw new IllegalStateException("can't add nodes to a stopped transport"); } if (node == null) { throw new ConnectTransportException(null, "can't connect to a null node"); @@ -746,7 +746,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem connectionLock.acquire(node.id()); try { if (!lifecycle.started()) { - throw new ElasticsearchIllegalStateException("can't add nodes to a stopped transport"); + throw new IllegalStateException("can't add nodes to a stopped transport"); } NodeChannels nodeChannels = connectedNodes.get(node); if (nodeChannels != null) { @@ -1107,7 +1107,7 @@ public class NettyTransport extends AbstractLifecycleComponent implem } else if (type == TransportRequestOptions.Type.RECOVERY) { return recovery[MathUtils.mod(recoveryCounter.incrementAndGet(), recovery.length)]; } else { - throw new ElasticsearchIllegalArgumentException("no type channel for [" + type + "]"); + throw new IllegalArgumentException("no type channel for [" + type + "]"); } } diff --git a/src/main/java/org/elasticsearch/tribe/TribeService.java b/src/main/java/org/elasticsearch/tribe/TribeService.java index 90989561eff..51cb9d0ecab 100644 --- a/src/main/java/org/elasticsearch/tribe/TribeService.java +++ b/src/main/java/org/elasticsearch/tribe/TribeService.java @@ -73,7 +73,7 @@ import java.util.Set; */ public class TribeService extends AbstractLifecycleComponent { - public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA)); + public static final ClusterBlock TRIBE_METADATA_BLOCK = new ClusterBlock(10, "tribe node, metadata not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.METADATA_READ, ClusterBlockLevel.METADATA_WRITE)); public static final ClusterBlock TRIBE_WRITE_BLOCK = new ClusterBlock(11, "tribe node, write not allowed", false, false, RestStatus.BAD_REQUEST, EnumSet.of(ClusterBlockLevel.WRITE)); public static Settings processSettings(Settings settings) { @@ -164,7 +164,7 @@ public class TribeService extends AbstractLifecycleComponent { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { for (Node node : nodes) { try { node.start(); @@ -186,12 +186,12 @@ public class TribeService extends AbstractLifecycleComponent { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { doClose(); } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { for (Node node : nodes) { try { node.close(); diff --git a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java index 0e59fdae14c..90e37447bb5 100644 --- a/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java +++ b/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java @@ -19,7 +19,6 @@ package org.elasticsearch.watcher; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; @@ -92,7 +91,7 @@ public class ResourceWatcherService extends AbstractLifecycleComponent", "", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0); highlighter.setBreakIterator(new WholeBreakIterator()); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is a test. Just a test highlighting from postings highlighter.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the second value to perform highlighting on.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the third value to test highlighting with postings.")); @@ -106,15 +106,15 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //let's try without whole break iterator as well, to prove that highlighting works the same when working per value (not optimized though) highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, false, Integer.MAX_VALUE - 1, 0); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("Just a test highlighting from postings highlighter.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the second value to perform highlighting on.")); - snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is the third value to test highlighting with postings.")); @@ -177,7 +177,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -190,7 +190,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { boolean mergeValues = true; CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(4)); @@ -205,7 +205,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); List snippets2 = new ArrayList<>(); for (int i = 0; i < fieldValues.size(); i++) { - snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, searcher, docId, 5))); + snippets2.addAll(Arrays.asList(highlighter.highlightDoc("body", queryTerms, ir, docId, 5))); } assertThat(snippets2.size(), equalTo(4)); @@ -292,7 +292,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("body", "highlighting")); BytesRef[] queryTerms = filterTerms(extractTerms(searcher, query), "body", true); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -305,7 +305,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { boolean mergeValues = true; CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(new CustomPassageFormatter("", "", new DefaultEncoder()), fieldValues, mergeValues, Integer.MAX_VALUE-1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", queryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(4)); @@ -379,7 +379,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { Query query = new TermQuery(new Term("none", "highlighting")); IndexSearcher searcher = newSearcher(ir); SortedSet queryTerms = extractTerms(searcher, query); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -392,7 +392,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //no snippets with simulated require field match (we filter the terms ourselves) boolean requireFieldMatch = true; BytesRef[] filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch); - Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(0)); @@ -400,7 +400,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { //one snippet without require field match, just passing in the query terms with no filtering on our side requireFieldMatch = false; filteredQueryTerms = filterTerms(queryTerms, "body", requireFieldMatch); - snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("Just a test highlighting from postings.")); @@ -435,7 +435,7 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { IndexSearcher searcher = newSearcher(ir); SortedSet queryTerms = extractTerms(searcher, query); - TopDocs topDocs = searcher.search(query, null, 10, Sort.INDEXORDER); + TopDocs topDocs = searcher.search(query, 10, Sort.INDEXORDER); assertThat(topDocs.totalHits, equalTo(1)); int docId = topDocs.scoreDocs[0].doc; @@ -446,11 +446,11 @@ public class CustomPostingsHighlighterTests extends ElasticsearchTestCase { CustomPassageFormatter passageFormatter = new CustomPassageFormatter("", "", new DefaultEncoder()); CustomPostingsHighlighter highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, 0); - Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + Snippet[] snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(0)); highlighter = new CustomPostingsHighlighter(passageFormatter, values, true, Integer.MAX_VALUE - 1, scaledRandomIntBetween(1, 10)); - snippets = highlighter.highlightDoc("body", filteredQueryTerms, searcher, docId, 5); + snippets = highlighter.highlightDoc("body", filteredQueryTerms, ir, docId, 5); assertThat(snippets.length, equalTo(1)); assertThat(snippets[0].getText(), equalTo("This is a test.")); diff --git a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java index b20e544866d..7bd77155962 100644 --- a/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java +++ b/src/test/java/org/apache/lucene/search/postingshighlight/XPostingsHighlighterTests.java @@ -26,8 +26,6 @@ import org.apache.lucene.index.*; import org.apache.lucene.search.*; import org.apache.lucene.search.highlight.DefaultEncoder; import org.apache.lucene.store.Directory; -import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; diff --git a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java index 2c605f889aa..16914ab6eef 100644 --- a/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java +++ b/src/test/java/org/elasticsearch/ElasticsearchExceptionTests.java @@ -19,13 +19,28 @@ package org.elasticsearch; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexException; +import org.elasticsearch.index.query.QueryParsingException; +import org.elasticsearch.index.query.TestQueryParsingException; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.transport.RemoteTransportException; import org.junit.Test; +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.io.IOException; + import static org.hamcrest.Matchers.equalTo; public class ElasticsearchExceptionTests extends ElasticsearchTestCase { @@ -43,5 +58,179 @@ public class ElasticsearchExceptionTests extends ElasticsearchTestCase { exception = new RemoteTransportException("test", new IndexMissingException(new Index("test"))); assertThat(exception.status(), equalTo(RestStatus.NOT_FOUND)); + + exception = new RemoteTransportException("test", new IllegalArgumentException("foobar")); + assertThat(exception.status(), equalTo(RestStatus.BAD_REQUEST)); + + exception = new RemoteTransportException("test", new IllegalStateException("foobar")); + assertThat(exception.status(), equalTo(RestStatus.INTERNAL_SERVER_ERROR)); } + + public void testGuessRootCause() { + { + ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new IndexException(new Index("foo"), "index is closed", new RuntimeException("foobar")))); + ElasticsearchException[] rootCauses = exception.guessRootCauses(); + assertEquals(rootCauses.length, 1); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "index_exception"); + assertEquals(rootCauses[0].getMessage(), "index is closed"); + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); + if (randomBoolean()) { + rootCauses = (randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex).guessRootCauses(); + } else { + rootCauses = ElasticsearchException.guessRootCauses(randomBoolean() ? new RemoteTransportException("remoteboom", ex) : ex); + } + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "test_query_parsing_exception"); + assertEquals(rootCauses[0].getMessage(), "foobar"); + + ElasticsearchException oneLevel = new ElasticsearchException("foo", new RuntimeException("foobar")); + rootCauses = oneLevel.guessRootCauses(); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "exception"); + assertEquals(rootCauses[0].getMessage(), "foo"); + } + { + ShardSearchFailure failure = new ShardSearchFailure( + new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), 1, 2, "foobar", null), + new SearchShardTarget("node_1", "foo1", 1)); + ShardSearchFailure failure2 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), 1, 2, "foobar", null), + new SearchShardTarget("node_1", "foo1", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); + final ElasticsearchException[] rootCauses = ex.guessRootCauses(); + assertEquals(rootCauses.length, 2); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[0]), "test_query_parsing_exception"); + assertEquals(rootCauses[0].getMessage(), "foobar"); + assertEquals(((QueryParsingException)rootCauses[0]).index().name(), "foo"); + assertEquals(ElasticsearchException.getExceptionName(rootCauses[1]), "test_query_parsing_exception"); + assertEquals(rootCauses[1].getMessage(), "foobar"); + assertEquals(((QueryParsingException) rootCauses[1]).getLineNumber(), 1); + assertEquals(((QueryParsingException) rootCauses[1]).getColumnNumber(), 2); + + } + + { + final ElasticsearchException[] foobars = ElasticsearchException.guessRootCauses(new IllegalArgumentException("foobar")); + assertEquals(foobars.length, 1); + assertTrue(foobars[0] instanceof ElasticsearchException); + assertEquals(foobars[0].getMessage(), "foobar"); + assertEquals(foobars[0].getCause().getClass(), IllegalArgumentException.class); + assertEquals(foobars[0].getExceptionName(), "illegal_argument_exception"); + } + + } + + public void testDeduplicate() throws IOException { + { + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1}); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ex.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]}"; + assertEquals(expected, builder.string()); + } + { + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), "foobar", null), + new SearchShardTarget("node_1", "foo1", 1)); + ShardSearchFailure failure2 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo1"), "foobar", null), + new SearchShardTarget("node_1", "foo1", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[]{failure, failure1, failure2}); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ex.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + String expected = "{\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}},{\"shard\":1,\"index\":\"foo1\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo1\"}}]}"; + assertEquals(expected, builder.string()); + } + } + + public void testGetRootCause() { + Exception root = new RuntimeException("foobar"); + ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", root))); + assertEquals(root, exception.getRootCause()); + assertTrue(exception.contains(RuntimeException.class)); + assertFalse(exception.contains(EOFException.class)); + } + + public void testToString() { + ElasticsearchException exception = new ElasticsearchException("foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", new RuntimeException("foobar")))); + assertEquals("ElasticsearchException[foo]; nested: ElasticsearchException[bar]; nested: IllegalArgumentException[index is closed]; nested: RuntimeException[foobar];", exception.toString()); + } + + public void testToXContent() throws IOException { + { + ElasticsearchException ex = new ElasticsearchException("foo", new ElasticsearchException("bar", new IllegalArgumentException("index is closed", new RuntimeException("foobar")))); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ex.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + + String expected = "{\"type\":\"exception\",\"reason\":\"foo\",\"caused_by\":{\"type\":\"exception\",\"reason\":\"bar\",\"caused_by\":{\"type\":\"illegal_argument_exception\",\"reason\":\"index is closed\",\"caused_by\":{\"type\":\"runtime_exception\",\"reason\":\"foobar\"}}}}"; + assertEquals(expected, builder.string()); + } + + { + Exception ex = new FileNotFoundException("foo not found"); + if (randomBoolean()) { + // just a wrapper which is omitted + ex = new RemoteTransportException("foobar", ex); + } + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); + builder.endObject(); + + String expected = "{\"type\":\"file_not_found_exception\",\"reason\":\"foo not found\"}"; + assertEquals(expected, builder.string()); + } + + { + QueryParsingException ex = new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); + builder.endObject(); + String expected = "{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"line\":1,\"col\":2,\"index\":\"foo\"}"; + assertEquals(expected, builder.string()); + } + + { // test equivalence + ElasticsearchException ex = new RemoteTransportException("foobar", new FileNotFoundException("foo not found")); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + ElasticsearchException.toXContent(builder, ToXContent.EMPTY_PARAMS, ex); + builder.endObject(); + + XContentBuilder otherBuilder = XContentFactory.jsonBuilder(); + + otherBuilder.startObject(); + ex.toXContent(otherBuilder, ToXContent.EMPTY_PARAMS); + otherBuilder.endObject(); + assertEquals(otherBuilder.string(), builder.string()); + } + } + + public void testSerializeElasticsearchException() throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + QueryParsingException ex = new TestQueryParsingException(new Index("foo"), 1, 2, "foobar", null); + out.writeThrowable(ex); + + BytesStreamInput in = new BytesStreamInput(out.bytes()); + QueryParsingException e = in.readThrowable(); + assertEquals(ex.index(), e.index()); + assertEquals(ex.getMessage(), e.getMessage()); + assertEquals(ex.getLineNumber(), e.getLineNumber()); + assertEquals(ex.getColumnNumber(), e.getColumnNumber()); + } + } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/VersionTests.java b/src/test/java/org/elasticsearch/VersionTests.java index c78c3118e57..65d6a4f25e2 100644 --- a/src/test/java/org/elasticsearch/VersionTests.java +++ b/src/test/java/org/elasticsearch/VersionTests.java @@ -117,7 +117,7 @@ public class VersionTests extends ElasticsearchTestCase { Version.fromString("WRONG.VERSION"); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testVersionNoPresentInSettings() { Version.indexCreated(ImmutableSettings.builder().build()); } diff --git a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java index 4cf46150904..2fa6da48ce6 100644 --- a/src/test/java/org/elasticsearch/action/IndicesRequestTests.java +++ b/src/test/java/org/elasticsearch/action/IndicesRequestTests.java @@ -61,8 +61,6 @@ import org.elasticsearch.action.count.CountAction; import org.elasticsearch.action.count.CountRequest; import org.elasticsearch.action.delete.DeleteAction; import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.deletebyquery.DeleteByQueryAction; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequest; import org.elasticsearch.action.exists.ExistsAction; import org.elasticsearch.action.exists.ExistsRequest; import org.elasticsearch.action.explain.ExplainAction; @@ -117,7 +115,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.*; -@ClusterScope(scope = Scope.SUITE, numClientNodes = 1) +@ClusterScope(scope = Scope.SUITE, numClientNodes = 1, minNumDataNodes = 2) @Slow public class IndicesRequestTests extends ElasticsearchIntegrationTest { @@ -259,18 +257,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { assertSameIndices(updateRequest, updateShardActions); } - @Test - public void testDeleteByQuery() { - String[] deleteByQueryShardActions = new String[]{DeleteByQueryAction.NAME + "[s]", DeleteByQueryAction.NAME + "[s][r]"}; - interceptTransportActions(deleteByQueryShardActions); - - DeleteByQueryRequest deleteByQueryRequest = new DeleteByQueryRequest(randomIndicesOrAliases()).source(new QuerySourceBuilder().setQuery(QueryBuilders.matchAllQuery())); - internalCluster().clientNodeClient().deleteByQuery(deleteByQueryRequest).actionGet(); - - clearInterceptedActions(); - assertSameIndices(deleteByQueryRequest, deleteByQueryShardActions); - } - @Test public void testBulk() { String[] bulkShardActions = new String[]{BulkAction.NAME + "[s]", BulkAction.NAME + "[s][r]"}; @@ -914,8 +900,8 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { } @Override - public void registerHandler(String action, TransportRequestHandler handler) { - super.registerHandler(action, new InterceptingRequestHandler(action, handler)); + public void registerRequestHandler(String action, Class request, String executor, boolean forceExecution, TransportRequestHandler handler) { + super.registerRequestHandler(action, request, executor, forceExecution, new InterceptingRequestHandler(action, handler)); } private class InterceptingRequestHandler implements TransportRequestHandler { @@ -928,11 +914,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { this.action = action; } - @Override - public TransportRequest newInstance() { - return requestHandler.newInstance(); - } - @Override public void messageReceived(TransportRequest request, TransportChannel channel) throws Exception { synchronized (InterceptingTransportService.this) { @@ -949,16 +930,6 @@ public class IndicesRequestTests extends ElasticsearchIntegrationTest { } requestHandler.messageReceived(request, channel); } - - @Override - public String executor() { - return requestHandler.executor(); - } - - @Override - public boolean isForceExecution() { - return requestHandler.isForceExecution(); - } } } } diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java new file mode 100644 index 00000000000..a901b103516 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/cluster/repositories/RepositoryBlocksTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.repositories; + +import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * This class tests that repository operations (Put, Delete, Verify) are blocked when the cluster is read-only. + * + * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + */ +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class RepositoryBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testPutRepositoryWithBlocks() { + logger.info("--> registering a repository is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir())), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> registering a repository is allowed when the cluster is not read only"); + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + } + + @Test + public void testVerifyRepositoryWithBlocks() { + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + VerifyRepositoryResponse response = client().admin().cluster().prepareVerifyRepository("test-repo-blocks").execute().actionGet(); + assertThat(response.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testDeleteRepositoryWithBlocks() { + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + logger.info("--> deleting a repository is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareDeleteRepository("test-repo-blocks"), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> deleting a repository is allowed when the cluster is not read only"); + assertAcked(client().admin().cluster().prepareDeleteRepository("test-repo-blocks")); + } + + @Test + public void testGetRepositoryWithBlocks() { + assertAcked(client().admin().cluster().preparePutRepository("test-repo-blocks") + .setType("fs") + .setVerify(false) + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + // This test checks that the Get Repository operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + GetRepositoriesResponse response = client().admin().cluster().prepareGetRepositories("test-repo-blocks").execute().actionGet(); + assertThat(response.repositories(), hasSize(1)); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java new file mode 100644 index 00000000000..e6ad356c21c --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/SnapshotBlocksTests.java @@ -0,0 +1,159 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.snapshots; + +import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryResponse; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Before; +import org.junit.Test; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; + +/** + * This class tests that snapshot operations (Create, Delete, Restore) are blocked when the cluster is read-only. + * + * The @ClusterScope TEST is needed because this class updates the cluster setting "cluster.blocks.read_only". + */ +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class SnapshotBlocksTests extends ElasticsearchIntegrationTest { + + protected static final String INDEX_NAME = "test-blocks"; + protected static final String REPOSITORY_NAME = "repo-" + INDEX_NAME; + protected static final String SNAPSHOT_NAME = "snapshot-0"; + + @Before + protected void setUpRepository() throws Exception { + createIndex(INDEX_NAME); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex(INDEX_NAME, "type").setSource("test", "init").execute().actionGet(); + } + + logger.info("--> register a repository"); + assertAcked(client().admin().cluster().preparePutRepository(REPOSITORY_NAME) + .setType("fs") + .setSettings(ImmutableSettings.settingsBuilder().put("location", createTempDir()))); + + logger.info("--> verify the repository"); + VerifyRepositoryResponse verifyResponse = client().admin().cluster().prepareVerifyRepository(REPOSITORY_NAME).get(); + assertThat(verifyResponse.getNodes().length, equalTo(cluster().numDataAndMasterNodes())); + + logger.info("--> create a snapshot"); + CreateSnapshotResponse snapshotResponse = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .execute().actionGet(); + assertThat(snapshotResponse.status(), equalTo(RestStatus.OK)); + ensureSearchable(); + } + + @Test + public void testCreateSnapshotWithBlocks() { + logger.info("--> creating a snapshot is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1"), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> creating a snapshot is allowed when the cluster is not read only"); + CreateSnapshotResponse response = client().admin().cluster().prepareCreateSnapshot(REPOSITORY_NAME, "snapshot-1") + .setWaitForCompletion(true) + .execute().actionGet(); + assertThat(response.status(), equalTo(RestStatus.OK)); + } + + @Test + public void testDeleteSnapshotWithBlocks() { + logger.info("--> deleting a snapshot is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> deleting a snapshot is allowed when the cluster is not read only"); + DeleteSnapshotResponse response = client().admin().cluster().prepareDeleteSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME).execute().actionGet(); + assertThat(response.isAcknowledged(), equalTo(true)); + } + + @Test + public void testRestoreSnapshotWithBlocks() { + assertAcked(client().admin().indices().prepareDelete(INDEX_NAME)); + assertFalse(client().admin().indices().prepareExists(INDEX_NAME).get().isExists()); + + logger.info("--> restoring a snapshot is blocked when the cluster is read only"); + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME), MetaData.CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + + logger.info("--> creating a snapshot is allowed when the cluster is not read only"); + RestoreSnapshotResponse response = client().admin().cluster().prepareRestoreSnapshot(REPOSITORY_NAME, SNAPSHOT_NAME) + .setWaitForCompletion(true) + .execute().actionGet(); + assertThat(response.status(), equalTo(RestStatus.OK)); + assertTrue(client().admin().indices().prepareExists(INDEX_NAME).get().isExists()); + } + + @Test + public void testGetSnapshotWithBlocks() { + // This test checks that the Get Snapshot operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + GetSnapshotsResponse response = client().admin().cluster().prepareGetSnapshots(REPOSITORY_NAME).execute().actionGet(); + assertThat(response.getSnapshots(), hasSize(1)); + assertThat(response.getSnapshots().get(0).name(), equalTo(SNAPSHOT_NAME)); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testSnapshotStatusWithBlocks() { + // This test checks that the Snapshot Status operation is never blocked, even if the cluster is read only. + try { + setClusterReadOnly(true); + SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(REPOSITORY_NAME) + .setSnapshots(SNAPSHOT_NAME) + .execute().actionGet(); + assertThat(response.getSnapshots(), hasSize(1)); + assertThat(response.getSnapshots().get(0).getState().completed(), equalTo(true)); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java new file mode 100644 index 00000000000..004f2c85daf --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/cluster/tasks/PendingTasksBlocksTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.cluster.tasks; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class PendingTasksBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testPendingTasksWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + // This test checks that the Pending Cluster Tasks operation is never blocked, even if an index is read only or whatever. + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet(); + assertNotNull(response.getPendingTasks()); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + try { + setClusterReadOnly(true); + PendingClusterTasksResponse response = client().admin().cluster().preparePendingClusterTasks().execute().actionGet(); + assertNotNull(response.getPendingTasks()); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java new file mode 100644 index 00000000000..b9d4b2e685c --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/cache/clear/ClearIndicesCacheBlocksTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.cache.clear; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class ClearIndicesCacheBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testClearIndicesCacheWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + ClearIndicesCacheResponse clearIndicesCacheResponse = client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setIdCache(true).execute().actionGet(); + assertNoFailures(clearIndicesCacheResponse); + assertThat(clearIndicesCacheResponse.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareClearCache("test").setFieldDataCache(true).setFilterCache(true).setIdCache(true)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java b/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java index 638c9a61fb4..a8318101bf2 100644 --- a/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java +++ b/src/test/java/org/elasticsearch/action/admin/indices/create/CreateIndexTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.action.admin.indices.create; -import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -34,6 +33,7 @@ import org.junit.Test; import java.util.HashMap; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.*; import static org.hamcrest.core.IsNull.notNullValue; @@ -106,38 +106,86 @@ public class CreateIndexTests extends ElasticsearchIntegrationTest{ public void testInvalidShardCountSettings() throws Exception { try { prepareCreate("test").setSettings(ImmutableSettings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) - .build()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) + .build()) .get(); fail("should have thrown an exception about the primary shard count"); - } catch (ActionRequestValidationException e) { + } catch (IllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); } try { prepareCreate("test").setSettings(ImmutableSettings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) - .build()) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) + .build()) .get(); fail("should have thrown an exception about the replica shard count"); - } catch (ActionRequestValidationException e) { + } catch (IllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); } try { prepareCreate("test").setSettings(ImmutableSettings.builder() - .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) - .build()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(-10, 0)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) + .build()) .get(); fail("should have thrown an exception about the shard count"); - } catch (ActionRequestValidationException e) { + } catch (IllegalArgumentException e) { assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); assertThat("message contains error about shard count: " + e.getMessage(), e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); } } + + @Test + public void testCreateIndexWithBlocks() { + try { + setClusterReadOnly(true); + assertBlocked(prepareCreate("test")); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testInvalidShardCountSettingsWithoutPrefix() throws Exception { + try { + prepareCreate("test").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0)) + .build()) + .get(); + fail("should have thrown an exception about the shard count"); + } catch (IllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); + } + try { + prepareCreate("test").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) + .build()) + .get(); + fail("should have thrown an exception about the shard count"); + } catch (IllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); + } + try { + prepareCreate("test").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, 0)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) + .build()) + .get(); + fail("should have thrown an exception about the shard count"); + } catch (IllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 1 or more primary shards"), equalTo(true)); + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("index must have 0 or more replica shards"), equalTo(true)); + } + } + } diff --git a/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java new file mode 100644 index 00000000000..6973f63a227 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/delete/DeleteIndexBlocksTests.java @@ -0,0 +1,43 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.delete; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class DeleteIndexBlocksTests extends ElasticsearchIntegrationTest{ + + @Test + public void testDeleteIndexWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + try { + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareDelete("test")); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java new file mode 100644 index 00000000000..b9fa6bcd8b5 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/flush/FlushBlocksTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.flush; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class FlushBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testFlushWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + FlushResponse response = client().admin().indices().prepareFlush("test").execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareFlush("test")); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Flushing all indices is blocked when the cluster is read-only + try { + FlushResponse response = client().admin().indices().prepareFlush().execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareFlush()); + } finally { + setClusterReadOnly(false); + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java b/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java index f335d40e79b..2b4f4bcab6b 100644 --- a/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java +++ b/src/test/java/org/elasticsearch/action/admin/indices/get/GetIndexTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.action.admin.indices.get; import com.google.common.collect.ImmutableList; - import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature; import org.elasticsearch.cluster.metadata.AliasMetaData; @@ -34,12 +33,13 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.anyOf; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.notNullValue; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.*; @ElasticsearchIntegrationTest.SuiteScopeTest public class GetIndexTests extends ElasticsearchIntegrationTest { @@ -205,6 +205,32 @@ public class GetIndexTests extends ElasticsearchIntegrationTest { assertEmptyWarmers(response); } + @Test + public void testGetIndexWithBlocks() { + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("idx", block); + GetIndexResponse response = client().admin().indices().prepareGetIndex().addIndices("idx") + .addFeatures(Feature.MAPPINGS, Feature.ALIASES).get(); + String[] indices = response.indices(); + assertThat(indices, notNullValue()); + assertThat(indices.length, equalTo(1)); + assertThat(indices[0], equalTo("idx")); + assertMappings(response, "idx"); + assertAliases(response, "idx"); + } finally { + disableIndexBlock("idx", block); + } + } + + try { + enableIndexBlock("idx", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetIndex().addIndices("idx").addFeatures(Feature.MAPPINGS, Feature.ALIASES), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("idx", SETTING_BLOCKS_METADATA); + } + } + private GetIndexResponse runWithRandomFeatureMethod(GetIndexRequestBuilder requestBuilder, Feature... features) { if (randomBoolean()) { return requestBuilder.addFeatures(features).get(); diff --git a/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java new file mode 100644 index 00000000000..47b5fbe0da8 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/optimize/OptimizeBlocksTests.java @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.optimize; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class OptimizeBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testOptimizeWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + OptimizeResponse response = client().admin().indices().prepareOptimize("test").execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareOptimize("test")); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Optimizing all indices is blocked when the cluster is read-only + try { + OptimizeResponse response = client().admin().indices().prepareOptimize().execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareFlush()); + } finally { + setClusterReadOnly(false); + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java new file mode 100644 index 00000000000..fc83f96eb3c --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/refresh/RefreshBlocksTests.java @@ -0,0 +1,78 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.refresh; + + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class RefreshBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testRefreshWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + NumShards numShards = getNumShards("test"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + RefreshResponse response = client().admin().indices().prepareRefresh("test").execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareRefresh("test")); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Refreshing all indices is blocked when the cluster is read-only + try { + RefreshResponse response = client().admin().indices().prepareRefresh().execute().actionGet(); + assertNoFailures(response); + assertThat(response.getSuccessfulShards(), equalTo(numShards.totalNumShards)); + + setClusterReadOnly(true); + assertBlocked(client().admin().indices().prepareRefresh()); + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java new file mode 100644 index 00000000000..a3f2f9f1044 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsBlocksTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.segments; + +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesSegmentsBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testIndicesSegmentsWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test-blocks", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + client().admin().indices().prepareFlush("test-blocks").get(); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test-blocks", blockSetting); + IndicesSegmentResponse response = client().admin().indices().prepareSegments("test-blocks").execute().actionGet(); + assertNoFailures(response); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareSegments("test-blocks")); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java b/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java new file mode 100644 index 00000000000..d6dba10e696 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsBlocksTests.java @@ -0,0 +1,62 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.stats; + +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesStatsBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testIndicesStatsWithBlocks() { + createIndex("ro"); + ensureGreen("ro"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("ro", blockSetting); + IndicesStatsResponse indicesStatsResponse = client().admin().indices().prepareStats("ro").execute().actionGet(); + assertNotNull(indicesStatsResponse.getIndex("ro")); + } finally { + disableIndexBlock("ro", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + client().admin().indices().prepareStats("ro").execute().actionGet(); + fail("Exists should fail when " + IndexMetaData.SETTING_BLOCKS_METADATA + " is true"); + } catch (ClusterBlockException e) { + // Ok, a ClusterBlockException is expected + } finally { + disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java b/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java index 6b9379fe052..5140df378dd 100644 --- a/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java +++ b/src/test/java/org/elasticsearch/action/bulk/BulkProcessorClusterSettingsTests.java @@ -48,7 +48,7 @@ public class BulkProcessorClusterSettingsTests extends ElasticsearchIntegrationT assertEquals(3, responses.length); assertFalse("Operation on existing index should succeed", responses[0].isFailed()); assertTrue("Missing index should have been flagged", responses[1].isFailed()); - assertEquals("IndexMissingException[[wontwork] missing]", responses[1].getFailureMessage()); + assertEquals("[wontwork] no such index", responses[1].getFailureMessage()); assertFalse("Operation on existing index should succeed", responses[2].isFailed()); } } diff --git a/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java b/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java new file mode 100644 index 00000000000..22387d277e8 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/count/CountRequestBuilderTests.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.count; + +import org.elasticsearch.action.support.QuerySourceBuilder; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; + +public class CountRequestBuilderTests extends ElasticsearchTestCase { + + private static Client client; + + @BeforeClass + public static void initClient() { + //this client will not be hit by any request, but it needs to be a non null proper client + //that is why we create it but we don't add any transport address to it + client = new TransportClient(); + } + + @AfterClass + public static void closeClient() { + client.close(); + client = null; + } + + @Test + public void testEmptySourceToString() { + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().toString())); + } + + @Test + public void testQueryBuilderQueryToString() { + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + countRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); + assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().setQuery(QueryBuilders.matchAllQuery()).toString())); + } + + @Test + public void testStringQueryToString() { + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + String query = "{ \"match_all\" : {} }"; + countRequestBuilder.setQuery(new BytesArray(query)); + assertThat(countRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }")); + } + + @Test + public void testXContentBuilderQueryToString() throws IOException { + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + xContentBuilder.startObject(); + xContentBuilder.startObject("match_all"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + countRequestBuilder.setQuery(xContentBuilder); + assertThat(countRequestBuilder.toString(), equalTo(new QuerySourceBuilder().setQuery(xContentBuilder.bytes()).toString())); + } + + @Test + public void testStringSourceToString() { + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + String query = "{ \"query\": { \"match_all\" : {} } }"; + countRequestBuilder.setSource(new BytesArray(query)); + assertThat(countRequestBuilder.toString(), equalTo("{ \"query\": { \"match_all\" : {} } }")); + } + + @Test + public void testXContentBuilderSourceToString() throws IOException { + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client); + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + xContentBuilder.startObject(); + xContentBuilder.startObject("match_all"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + countRequestBuilder.setSource(xContentBuilder.bytes()); + assertThat(countRequestBuilder.toString(), equalTo(XContentHelper.convertToJson(xContentBuilder.bytes(), false, true))); + } + + @Test + public void testThatToStringDoesntWipeSource() { + String source = "{\n" + + " \"query\" : {\n" + + " \"match\" : {\n" + + " \"field\" : {\n" + + " \"query\" : \"value\"" + + " }\n" + + " }\n" + + " }\n" + + " }"; + CountRequestBuilder countRequestBuilder = new CountRequestBuilder(client).setSource(new BytesArray(source)); + String preToString = countRequestBuilder.request().source().toUtf8(); + assertThat(countRequestBuilder.toString(), equalTo(source)); + String postToString = countRequestBuilder.request().source().toUtf8(); + assertThat(preToString, equalTo(postToString)); + } +} diff --git a/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index e09253ff290..a8aead541c7 100644 --- a/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.action.index; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; import static org.hamcrest.Matchers.equalTo; @@ -40,7 +39,7 @@ public class IndexRequestTests extends ElasticsearchTestCase { assertThat(IndexRequest.OpType.fromString(indexUpper), equalTo(IndexRequest.OpType.INDEX)); } - @Test(expected= ElasticsearchIllegalArgumentException.class) + @Test(expected= IllegalArgumentException.class) public void testReadBogusString(){ String foobar = "foobar"; IndexRequest.OpType.fromString(foobar); diff --git a/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java index 26af18c90d0..50f915bfe67 100644 --- a/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java +++ b/src/test/java/org/elasticsearch/action/percolate/MultiPercolatorRequestTests.java @@ -38,7 +38,7 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/percolate/mpercolate1.json"); MultiPercolateRequest request = new MultiPercolateRequest().add(data, 0, data.length); - assertThat(request.requests().size(), equalTo(6)); + assertThat(request.requests().size(), equalTo(8)); PercolateRequest percolateRequest = request.requests().get(0); assertThat(percolateRequest.indices()[0], equalTo("my-index1")); assertThat(percolateRequest.documentType(), equalTo("my-type1")); @@ -61,8 +61,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map())); percolateRequest = request.requests().get(2); assertThat(percolateRequest.indices()[0], equalTo("my-index4")); @@ -74,8 +74,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(true)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map())); percolateRequest = request.requests().get(3); assertThat(percolateRequest.indices()[0], equalTo("my-index6")); @@ -114,8 +114,40 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value4").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value4").map())); + + percolateRequest = request.requests().get(6); + assertThat(percolateRequest.indices()[0], equalTo("percolate-index1")); + assertThat(percolateRequest.documentType(), equalTo("other-type")); + assertThat(percolateRequest.routing(), equalTo("percolate-routing-1")); + assertThat(percolateRequest.preference(), equalTo("_local")); + assertThat(percolateRequest.getRequest(), notNullValue()); + assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index9")); + assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); + assertThat(percolateRequest.getRequest().routing(), nullValue()); + assertThat(percolateRequest.getRequest().preference(), nullValue()); + assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.strictExpandOpenAndForbidClosed())); + assertThat(percolateRequest.onlyCount(), equalTo(false)); + assertThat(percolateRequest.source(), notNullValue()); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), nullValue()); + + percolateRequest = request.requests().get(7); + assertThat(percolateRequest.indices()[0], equalTo("my-index10")); + assertThat(percolateRequest.documentType(), equalTo("my-type1")); + assertThat(percolateRequest.routing(), nullValue()); + assertThat(percolateRequest.preference(), nullValue()); + assertThat(percolateRequest.getRequest(), notNullValue()); + assertThat(percolateRequest.getRequest().indices()[0], equalTo("my-index10")); + assertThat(percolateRequest.getRequest().type(), equalTo("my-type1")); + assertThat(percolateRequest.getRequest().routing(), nullValue()); + assertThat(percolateRequest.getRequest().preference(), nullValue()); + assertThat(percolateRequest.indicesOptions(), equalTo(IndicesOptions.fromOptions(false, false, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(percolateRequest.onlyCount(), equalTo(false)); + assertThat(percolateRequest.source(), notNullValue()); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), nullValue()); } @Test @@ -147,8 +179,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value2").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value2").map())); percolateRequest = request.requests().get(2); assertThat(percolateRequest.indices()[0], equalTo("my-index1")); @@ -157,8 +189,8 @@ public class MultiPercolatorRequestTests extends ElasticsearchTestCase { assertThat(percolateRequest.onlyCount(), equalTo(false)); assertThat(percolateRequest.getRequest(), nullValue()); assertThat(percolateRequest.source(), notNullValue()); - sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); - assertThat(sourceMap.get("doc"), equalTo((Object)MapBuilder.newMapBuilder().put("field1", "value3").map())); + sourceMap = XContentFactory.xContent(percolateRequest.source()).createParser(percolateRequest.source()).map(); + assertThat(sourceMap.get("doc"), equalTo((Object) MapBuilder.newMapBuilder().put("field1", "value3").map())); } } diff --git a/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json index ceb4acae44e..44079390bfc 100644 --- a/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json +++ b/src/test/java/org/elasticsearch/action/percolate/mpercolate1.json @@ -10,3 +10,7 @@ {} {"percolate" : {"index" : "my-index8", "type" : "my-type1", "routing" : "my-routing-1", "preference" : "primary"}} {"doc" : {"field1" : "value4"}} +{"percolate" : {"id" : "3", "index" : "my-index9", "type" : "my-type1", "percolate_index": "percolate-index1", "percolate_type": "other-type", "percolate_preference": "_local", "percolate_routing": "percolate-routing-1"}} +{} +{"percolate" : {"id" : "4", "index" : "my-index10", "type" : "my-type1", "allow_no_indices": false, "expand_wildcards" : ["open"]}} +{} diff --git a/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java index d4d82ede283..ee520760b53 100644 --- a/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java +++ b/src/test/java/org/elasticsearch/action/search/MultiSearchRequestTests.java @@ -35,20 +35,26 @@ public class MultiSearchRequestTests extends ElasticsearchTestCase { public void simpleAdd() throws Exception { byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch1.json"); MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); - assertThat(request.requests().size(), equalTo(5)); + assertThat(request.requests().size(), equalTo(8)); assertThat(request.requests().get(0).indices()[0], equalTo("test")); assertThat(request.requests().get(0).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); assertThat(request.requests().get(0).types().length, equalTo(0)); assertThat(request.requests().get(1).indices()[0], equalTo("test")); assertThat(request.requests().get(1).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); assertThat(request.requests().get(1).types()[0], equalTo("type1")); - assertThat(request.requests().get(2).indices(), nullValue()); - assertThat(request.requests().get(2).types().length, equalTo(0)); - assertThat(request.requests().get(3).indices(), nullValue()); - assertThat(request.requests().get(3).types().length, equalTo(0)); - assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); - assertThat(request.requests().get(4).indices(), nullValue()); - assertThat(request.requests().get(4).types().length, equalTo(0)); + assertThat(request.requests().get(2).indices()[0], equalTo("test")); + assertThat(request.requests().get(2).indicesOptions(), equalTo(IndicesOptions.fromOptions(false, true, true, false, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(3).indices()[0], equalTo("test")); + assertThat(request.requests().get(3).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, true, true, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(4).indices()[0], equalTo("test")); + assertThat(request.requests().get(4).indicesOptions(), equalTo(IndicesOptions.fromOptions(true, false, false, true, IndicesOptions.strictExpandOpenAndForbidClosed()))); + assertThat(request.requests().get(5).indices(), nullValue()); + assertThat(request.requests().get(5).types().length, equalTo(0)); + assertThat(request.requests().get(6).indices(), nullValue()); + assertThat(request.requests().get(6).types().length, equalTo(0)); + assertThat(request.requests().get(6).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); + assertThat(request.requests().get(7).indices(), nullValue()); + assertThat(request.requests().get(7).types().length, equalTo(0)); } @Test @@ -87,4 +93,25 @@ public class MultiSearchRequestTests extends ElasticsearchTestCase { assertThat(request.requests().get(3).types().length, equalTo(0)); assertThat(request.requests().get(3).searchType(), equalTo(SearchType.DFS_QUERY_THEN_FETCH)); } + + @Test + public void simpleAdd4() throws Exception { + byte[] data = Streams.copyToBytesFromClasspath("/org/elasticsearch/action/search/simple-msearch4.json"); + MultiSearchRequest request = new MultiSearchRequest().add(data, 0, data.length, null, null, null); + assertThat(request.requests().size(), equalTo(3)); + assertThat(request.requests().get(0).indices()[0], equalTo("test0")); + assertThat(request.requests().get(0).indices()[1], equalTo("test1")); + assertThat(request.requests().get(0).queryCache(), equalTo(true)); + assertThat(request.requests().get(0).preference(), nullValue()); + assertThat(request.requests().get(1).indices()[0], equalTo("test2")); + assertThat(request.requests().get(1).indices()[1], equalTo("test3")); + assertThat(request.requests().get(1).types()[0], equalTo("type1")); + assertThat(request.requests().get(1).queryCache(), nullValue()); + assertThat(request.requests().get(1).preference(), equalTo("_local")); + assertThat(request.requests().get(2).indices()[0], equalTo("test4")); + assertThat(request.requests().get(2).indices()[1], equalTo("test1")); + assertThat(request.requests().get(2).types()[0], equalTo("type2")); + assertThat(request.requests().get(2).types()[1], equalTo("type1")); + assertThat(request.requests().get(2).routing(), equalTo("123")); + } } diff --git a/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java b/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java new file mode 100644 index 00000000000..57a48bbbcc0 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/search/SearchRequestBuilderTests.java @@ -0,0 +1,129 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.search; + +import org.elasticsearch.client.Client; +import org.elasticsearch.client.transport.TransportClient; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; + +public class SearchRequestBuilderTests extends ElasticsearchTestCase { + + private static Client client; + + @BeforeClass + public static void initClient() { + //this client will not be hit by any request, but it needs to be a non null proper client + //that is why we create it but we don't add any transport address to it + client = new TransportClient(); + } + + @AfterClass + public static void closeClient() { + client.close(); + client = null; + } + + @Test + public void testEmptySourceToString() { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().toString())); + } + + @Test + public void testQueryBuilderQueryToString() { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + searchRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(QueryBuilders.matchAllQuery()).toString())); + } + + @Test + public void testXContentBuilderQueryToString() throws IOException { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + xContentBuilder.startObject(); + xContentBuilder.startObject("match_all"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + searchRequestBuilder.setQuery(xContentBuilder); + assertThat(searchRequestBuilder.toString(), equalTo(new SearchSourceBuilder().query(xContentBuilder).toString())); + } + + @Test + public void testStringQueryToString() { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + String query = "{ \"match_all\" : {} }"; + searchRequestBuilder.setQuery(query); + assertThat(searchRequestBuilder.toString(), containsString("\"query\":{ \"match_all\" : {} }")); + } + + @Test + public void testStringSourceToString() { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + String source = "{ \"query\" : { \"match_all\" : {} } }"; + searchRequestBuilder.setSource(source); + assertThat(searchRequestBuilder.toString(), equalTo(source)); + } + + @Test + public void testXContentBuilderSourceToString() throws IOException { + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client); + XContentBuilder xContentBuilder = XContentFactory.contentBuilder(randomFrom(XContentType.values())); + xContentBuilder.startObject(); + xContentBuilder.startObject("query"); + xContentBuilder.startObject("match_all"); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + xContentBuilder.endObject(); + searchRequestBuilder.setSource(xContentBuilder); + assertThat(searchRequestBuilder.toString(), equalTo(XContentHelper.convertToJson(xContentBuilder.bytes(), false, true))); + } + + @Test + public void testThatToStringDoesntWipeRequestSource() { + String source = "{\n" + + " \"query\" : {\n" + + " \"match\" : {\n" + + " \"field\" : {\n" + + " \"query\" : \"value\"" + + " }\n" + + " }\n" + + " }\n" + + " }"; + SearchRequestBuilder searchRequestBuilder = new SearchRequestBuilder(client).setSource(source); + String preToString = searchRequestBuilder.request().source().toUtf8(); + assertThat(searchRequestBuilder.toString(), equalTo(source)); + String postToString = searchRequestBuilder.request().source().toUtf8(); + assertThat(preToString, equalTo(postToString)); + } +} diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch1.json b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json index 6d31863fa3c..3d98f375153 100644 --- a/src/test/java/org/elasticsearch/action/search/simple-msearch1.json +++ b/src/test/java/org/elasticsearch/action/search/simple-msearch1.json @@ -2,6 +2,12 @@ {"query" : {"match_all" {}}} {"index" : "test", "type" : "type1", "expand_wildcards" : ["open", "closed"]} {"query" : {"match_all" {}}} +{"index":"test", "ignore_unavailable" : false, "expand_wildcards" : ["open"]}} +{"query" : {"match_all" {}}} +{"index":"test", "ignore_unavailable" : true, "allow_no_indices": true, "expand_wildcards" : ["open", "closed"]}} +{"query" : {"match_all" {}}} +{"index":"test", "ignore_unavailable" : true, "allow_no_indices": false, "expand_wildcards" : ["closed"]}} +{"query" : {"match_all" {}}} {} {"query" : {"match_all" {}}} {"search_type" : "dfs_query_then_fetch"} diff --git a/src/test/java/org/elasticsearch/action/search/simple-msearch4.json b/src/test/java/org/elasticsearch/action/search/simple-msearch4.json new file mode 100644 index 00000000000..ab6b8206b01 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/search/simple-msearch4.json @@ -0,0 +1,6 @@ +{"index":["test0", "test1"], "query_cache": true} +{"query" : {"match_all" {}}} +{"index" : "test2,test3", "type" : "type1", "preference": "_local"} +{"query" : {"match_all" {}}} +{"index" : ["test4", "test1"], "type" : [ "type2", "type1" ], "routing": "123"} +{"query" : {"match_all" {}}} diff --git a/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java new file mode 100644 index 00000000000..a28c78ccd34 --- /dev/null +++ b/src/test/java/org/elasticsearch/action/support/replication/ShardReplicationOperationTests.java @@ -0,0 +1,571 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.support.replication; + +import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionWriteResponse; +import org.elasticsearch.action.UnavailableShardsException; +import org.elasticsearch.action.WriteConsistencyLevel; +import org.elasticsearch.action.support.ActionFilter; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterService; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.action.shard.ShardStateAction; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.index.shard.IndexShardNotStartedException; +import org.elasticsearch.index.shard.IndexShardState; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.cluster.TestClusterService; +import org.elasticsearch.test.transport.CapturingTransport; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportResponse; +import org.elasticsearch.transport.TransportService; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.hamcrest.Matchers.*; + +public class ShardReplicationOperationTests extends ElasticsearchTestCase { + + private static ThreadPool threadPool; + + private TestClusterService clusterService; + private TransportService transportService; + private CapturingTransport transport; + private Action action; + + + @BeforeClass + public static void beforeClass() { + threadPool = new ThreadPool("ShardReplicationOperationTests"); + } + + @Before + public void setUp() throws Exception { + super.setUp(); + transport = new CapturingTransport(); + clusterService = new TestClusterService(threadPool); + transportService = new TransportService(transport, threadPool); + transportService.start(); + action = new Action(ImmutableSettings.EMPTY, "testAction", transportService, clusterService, threadPool); + } + + @AfterClass + public static void afterClass() { + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + threadPool = null; + } + + + void assertListenerThrows(String msg, PlainActionFuture listener, Class klass) throws InterruptedException { + try { + listener.get(); + fail(msg); + } catch (ExecutionException ex) { + assertThat(ex.getCause(), instanceOf(klass)); + } + + } + + @Test + public void testBlocks() throws ExecutionException, InterruptedException { + Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + ClusterBlocks.Builder block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + assertFalse("primary phase should stop execution", primaryPhase.checkBlocks()); + assertListenerThrows("primary phase should fail operation", listener, ClusterBlockException.class); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "retryable", true, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + listener = new PlainActionFuture<>(); + primaryPhase = action.new PrimaryPhase(new Request().timeout("5ms"), listener); + assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks()); + assertListenerThrows("failed to timeout on retryable block", listener, ClusterBlockException.class); + + + listener = new PlainActionFuture<>(); + primaryPhase = action.new PrimaryPhase(new Request(), listener); + assertFalse("primary phase should stop execution on retryable block", primaryPhase.checkBlocks()); + assertFalse("primary phase should wait on retryable block", listener.isDone()); + + block = ClusterBlocks.builder() + .addGlobalBlock(new ClusterBlock(1, "non retryable", false, true, RestStatus.SERVICE_UNAVAILABLE, ClusterBlockLevel.ALL)); + clusterService.setState(ClusterState.builder(clusterService.state()).blocks(block)); + assertListenerThrows("primary phase should fail operation when moving from a retryable block a non-retryable one", listener, ClusterBlockException.class); + } + + ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int numberOfReplicas) { + int assignedReplicas = randomIntBetween(0, numberOfReplicas); + return stateWithStartedPrimary(index, primaryLocal, assignedReplicas, numberOfReplicas - assignedReplicas); + } + + ClusterState stateWithStartedPrimary(String index, boolean primaryLocal, int assignedReplicas, int unassignedReplicas) { + ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; + // no point in randomizing - node assignment later on does it too. + for (int i = 0; i < assignedReplicas; i++) { + replicaStates[i] = randomFrom(ShardRoutingState.INITIALIZING, ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); + } + for (int i = assignedReplicas; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.UNASSIGNED; + } + return state(index, primaryLocal, randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING), replicaStates); + + } + + ClusterState state(String index, boolean primaryLocal, ShardRoutingState primaryState, ShardRoutingState... replicaStates) { + final int numberOfReplicas = replicaStates.length; + + int numberOfNodes = numberOfReplicas + 1; + if (primaryState == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + for (ShardRoutingState state : replicaStates) { + if (state == ShardRoutingState.RELOCATING) { + numberOfNodes++; + } + } + numberOfNodes = Math.max(2, numberOfNodes); // we need a non-local master to test shard failures + final ShardId shardId = new ShardId(index, 0); + DiscoveryNodes.Builder discoBuilder = DiscoveryNodes.builder(); + Set unassignedNodes = new HashSet<>(); + for (int i = 0; i < numberOfNodes + 1; i++) { + final DiscoveryNode node = newNode(i); + discoBuilder = discoBuilder.put(node); + unassignedNodes.add(node.id()); + } + discoBuilder.localNodeId(newNode(0).id()); + discoBuilder.masterNodeId(newNode(1).id()); // we need a non-local master to test shard failures + IndexMetaData indexMetaData = IndexMetaData.builder(index).settings(ImmutableSettings.builder() + .put(SETTING_VERSION_CREATED, Version.CURRENT) + .put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) + .put(SETTING_CREATION_DATE, System.currentTimeMillis())).build(); + + RoutingTable.Builder routing = new RoutingTable.Builder(); + routing.addAsNew(indexMetaData); + IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId, false); + + String primaryNode = null; + String relocatingNode = null; + if (primaryState != ShardRoutingState.UNASSIGNED) { + if (primaryLocal) { + primaryNode = newNode(0).id(); + unassignedNodes.remove(primaryNode); + } else { + primaryNode = selectAndRemove(unassignedNodes); + } + if (primaryState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } + indexShardRoutingBuilder.addShard(new ImmutableShardRouting(index, 0, primaryNode, relocatingNode, true, primaryState, 0)); + + for (ShardRoutingState replicaState : replicaStates) { + String replicaNode = null; + relocatingNode = null; + if (replicaState != ShardRoutingState.UNASSIGNED) { + assert primaryNode != null : "a replica is assigned but the primary isn't"; + replicaNode = selectAndRemove(unassignedNodes); + if (replicaState == ShardRoutingState.RELOCATING) { + relocatingNode = selectAndRemove(unassignedNodes); + } + } + indexShardRoutingBuilder.addShard( + new ImmutableShardRouting(index, shardId.id(), replicaNode, relocatingNode, false, replicaState, 0)); + + } + + ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); + state.nodes(discoBuilder); + state.metaData(MetaData.builder().put(indexMetaData, false).generateUuidIfNeeded()); + state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build()))); + return state.build(); + } + + private String selectAndRemove(Set strings) { + String selection = randomFrom(strings.toArray(new String[strings.size()])); + strings.remove(selection); + return selection; + } + + @Test + public void testNotStartedPrimary() throws InterruptedException, ExecutionException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + // no replicas in oder to skip the replication part + clusterService.setState(state(index, true, + randomBoolean() ? ShardRoutingState.INITIALIZING : ShardRoutingState.UNASSIGNED)); + + logger.debug("--> using initial state:\n{}", clusterService.state().prettyPrint()); + + Request request = new Request(shardId).timeout("1ms"); + PlainActionFuture listener = new PlainActionFuture<>(); + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + assertListenerThrows("unassigned primary didn't cause a timeout", listener, UnavailableShardsException.class); + + request = new Request(shardId); + listener = new PlainActionFuture<>(); + primaryPhase = action.new PrimaryPhase(request, listener); + primaryPhase.run(); + assertFalse("unassigned primary didn't cause a retry", listener.isDone()); + + clusterService.setState(state(index, true, ShardRoutingState.STARTED)); + logger.debug("--> primary assigned state:\n{}", clusterService.state().prettyPrint()); + + listener.get(); + assertTrue("request wasn't processed on primary, despite of it being assigned", request.processedOnPrimary.get()); + } + + @Test + public void testRoutingToPrimary() { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + + clusterService.setState(stateWithStartedPrimary(index, randomBoolean(), 3)); + + logger.debug("using state: \n{}", clusterService.state().prettyPrint()); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + final String primaryNodeId = shardRoutingTable.primaryShard().currentNodeId(); + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + assertTrue(primaryPhase.checkBlocks()); + primaryPhase.routeRequestOrPerformLocally(shardRoutingTable.primaryShard(), shardRoutingTable.shardsIt()); + if (primaryNodeId.equals(clusterService.localNode().id())) { + logger.info("--> primary is assigned locally, testing for execution"); + assertTrue("request failed to be processed on a local primary", request.processedOnPrimary.get()); + } else { + logger.info("--> primary is assigned to [{}], checking request forwarded", primaryNodeId); + final List capturedRequests = transport.capturedRequestsByTargetNode().get(primaryNodeId); + assertThat(capturedRequests, notNullValue()); + assertThat(capturedRequests.size(), equalTo(1)); + assertThat(capturedRequests.get(0).action, equalTo("testAction")); + } + } + + @Test + public void testWriteConsistency() { + action = new ActionWithConsistency(ImmutableSettings.EMPTY, "testActionWithConsistency", transportService, clusterService, threadPool); + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + final int assignedReplicas = randomInt(2); + final int unassignedReplicas = randomInt(2); + final int totalShards = 1 + assignedReplicas + unassignedReplicas; + final boolean passesWriteConsistency; + Request request = new Request(shardId).consistencyLevel(randomFrom(WriteConsistencyLevel.values())); + switch (request.consistencyLevel()) { + case ONE: + passesWriteConsistency = true; + break; + case DEFAULT: + case QUORUM: + if (totalShards <= 2) { + passesWriteConsistency = true; // primary is enough + } else { + passesWriteConsistency = assignedReplicas + 1 >= (totalShards / 2) + 1; + } + break; + case ALL: + passesWriteConsistency = unassignedReplicas == 0; + break; + default: + throw new RuntimeException("unknown consistency level [" + request.consistencyLevel() + "]"); + } + ShardRoutingState[] replicaStates = new ShardRoutingState[assignedReplicas + unassignedReplicas]; + for (int i = 0; i < assignedReplicas; i++) { + replicaStates[i] = randomFrom(ShardRoutingState.STARTED, ShardRoutingState.RELOCATING); + } + for (int i = assignedReplicas; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.UNASSIGNED; + } + + clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); + logger.debug("using consistency level of [{}], assigned shards [{}], total shards [{}]. expecting op to [{}]. using state: \n{}", + request.consistencyLevel(), 1 + assignedReplicas, 1 + assignedReplicas + unassignedReplicas, passesWriteConsistency ? "succeed" : "retry", + clusterService.state().prettyPrint()); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + PlainActionFuture listener = new PlainActionFuture<>(); + + TransportShardReplicationOperationAction.PrimaryPhase primaryPhase = action.new PrimaryPhase(request, listener); + if (passesWriteConsistency) { + assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), nullValue()); + primaryPhase.run(); + assertTrue("operations should have been perform, consistency level is met", request.processedOnPrimary.get()); + } else { + assertThat(primaryPhase.checkWriteConsistency(shardRoutingTable.primaryShard()), notNullValue()); + primaryPhase.run(); + assertFalse("operations should not have been perform, consistency level is *NOT* met", request.processedOnPrimary.get()); + for (int i = 0; i < replicaStates.length; i++) { + replicaStates[i] = ShardRoutingState.STARTED; + } + clusterService.setState(state(index, true, ShardRoutingState.STARTED, replicaStates)); + assertTrue("once the consistency level met, operation should continue", request.processedOnPrimary.get()); + } + + } + + @Test + public void testReplication() throws ExecutionException, InterruptedException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + + clusterService.setState(stateWithStartedPrimary(index, true, randomInt(5))); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + int assignedReplicas = 0; + int totalShards = 0; + for (ShardRouting shard : shardRoutingTable) { + totalShards++; + if (shard.primary() == false && shard.assignedToNode()) { + assignedReplicas++; + } + if (shard.relocating()) { + assignedReplicas++; + totalShards++; + } + } + + runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); + } + + @Test + public void testReplicationWithShadowIndex() throws ExecutionException, InterruptedException { + final String index = "test"; + final ShardId shardId = new ShardId(index, 0); + + ClusterState state = stateWithStartedPrimary(index, true, randomInt(5)); + MetaData.Builder metaData = MetaData.builder(state.metaData()); + ImmutableSettings.Builder settings = ImmutableSettings.builder().put(metaData.get(index).settings()); + settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true); + metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings)); + clusterService.setState(ClusterState.builder(state).metaData(metaData)); + + final IndexShardRoutingTable shardRoutingTable = clusterService.state().routingTable().index(index).shard(shardId.id()); + int assignedReplicas = 0; + int totalShards = 0; + for (ShardRouting shard : shardRoutingTable) { + totalShards++; + if (shard.primary() && shard.relocating()) { + assignedReplicas++; + totalShards++; + } + } + + runReplicateTest(shardRoutingTable, assignedReplicas, totalShards); + } + + + protected void runReplicateTest(IndexShardRoutingTable shardRoutingTable, int assignedReplicas, int totalShards) throws InterruptedException, ExecutionException { + final ShardRouting primaryShard = shardRoutingTable.primaryShard(); + final ShardIterator shardIt = shardRoutingTable.shardsIt(); + final ShardId shardId = shardIt.shardId(); + final Request request = new Request(); + PlainActionFuture listener = new PlainActionFuture<>(); + + logger.debug("expecting [{}] assigned replicas, [{}] total shards. using state: \n{}", assignedReplicas, totalShards, clusterService.state().prettyPrint()); + + + final TransportShardReplicationOperationAction.InternalRequest internalRequest = action.new InternalRequest(request); + internalRequest.concreteIndex(shardId.index().name()); + TransportShardReplicationOperationAction.ReplicationPhase replicationPhase = + action.new ReplicationPhase(shardIt, request, + new Response(), new ClusterStateObserver(clusterService, logger), + primaryShard, internalRequest, listener); + + assertThat(replicationPhase.totalShards(), equalTo(totalShards)); + assertThat(replicationPhase.pending(), equalTo(assignedReplicas)); + replicationPhase.run(); + final CapturingTransport.CapturedRequest[] capturedRequests = transport.capturedRequests(); + transport.clear(); + assertThat(capturedRequests.length, equalTo(assignedReplicas)); + if (assignedReplicas > 0) { + assertThat("listener is done, but there are outstanding replicas", listener.isDone(), equalTo(false)); + } + int pending = replicationPhase.pending(); + int criticalFailures = 0; // failures that should fail the shard + int successfull = 1; + for (CapturingTransport.CapturedRequest capturedRequest : capturedRequests) { + if (randomBoolean()) { + Throwable t; + if (randomBoolean()) { + t = new CorruptIndexException("simulated", (String) null); + criticalFailures++; + } else { + t = new IndexShardNotStartedException(shardId, IndexShardState.RECOVERING); + } + logger.debug("--> simulating failure on {} with [{}]", capturedRequest.node, t.getClass().getSimpleName()); + transport.handleResponse(capturedRequest.requestId, t); + } else { + successfull++; + transport.handleResponse(capturedRequest.requestId, TransportResponse.Empty.INSTANCE); + } + pending--; + assertThat(replicationPhase.pending(), equalTo(pending)); + assertThat(replicationPhase.successful(), equalTo(successfull)); + } + assertThat(listener.isDone(), equalTo(true)); + Response response = listener.get(); + final ActionWriteResponse.ShardInfo shardInfo = response.getShardInfo(); + assertThat(shardInfo.getFailed(), equalTo(criticalFailures)); + assertThat(shardInfo.getFailures(), arrayWithSize(criticalFailures)); + assertThat(shardInfo.getSuccessful(), equalTo(successfull)); + assertThat(shardInfo.getTotal(), equalTo(totalShards)); + + assertThat("failed to see enough shard failures", transport.capturedRequests().length, equalTo(criticalFailures)); + for (CapturingTransport.CapturedRequest capturedRequest : transport.capturedRequests()) { + assertThat(capturedRequest.action, equalTo(ShardStateAction.SHARD_FAILED_ACTION_NAME)); + } + } + + + static class Request extends ShardReplicationOperationRequest { + int shardId; + public AtomicBoolean processedOnPrimary = new AtomicBoolean(); + public AtomicInteger processedOnReplicas = new AtomicInteger(); + + Request() { + this.operationThreaded(false); + } + + Request(ShardId shardId) { + this(); + this.shardId = shardId.id(); + this.index(shardId.index().name()); + // keep things simple + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeVInt(shardId); + } + + @Override + public void readFrom(StreamInput in) throws IOException { + super.readFrom(in); + shardId = in.readVInt(); + } + } + + static class Response extends ActionWriteResponse { + + } + + static class Action extends TransportShardReplicationOperationAction { + + Action(Settings settings, String actionName, TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool) { + super(settings, actionName, transportService, clusterService, null, threadPool, + new ShardStateAction(settings, clusterService, transportService, null, null), + new ActionFilters(new HashSet()), Request.class, Request.class, ThreadPool.Names.SAME); + } + + @Override + protected Response newResponseInstance() { + return new Response(); + } + + @Override + protected Tuple shardOperationOnPrimary(ClusterState clusterState, PrimaryOperationRequest shardRequest) throws Throwable { + boolean executedBefore = shardRequest.request.processedOnPrimary.getAndSet(true); + assert executedBefore == false : "request has already been executed on the primary"; + return new Tuple<>(new Response(), shardRequest.request); + } + + @Override + protected void shardOperationOnReplica(ShardId shardId, Request request) { + request.processedOnReplicas.incrementAndGet(); + } + + @Override + protected ShardIterator shards(ClusterState clusterState, InternalRequest request) { + return clusterState.getRoutingTable().index(request.concreteIndex()).shard(request.request().shardId).shardsIt(); + } + + @Override + protected boolean checkWriteConsistency() { + return false; + } + + @Override + protected boolean resolveIndex() { + return false; + } + } + + static class ActionWithConsistency extends Action { + + ActionWithConsistency(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) { + super(settings, actionName, transportService, clusterService, threadPool); + } + + @Override + protected boolean checkWriteConsistency() { + return true; + } + } + + static DiscoveryNode newNode(int nodeId) { + return new DiscoveryNode("node_" + nodeId, DummyTransportAddress.INSTANCE, Version.CURRENT); + } + + +} diff --git a/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java b/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java index 10d870e0f9a..f85428cfbae 100644 --- a/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java +++ b/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsCheckDocFreqTests.java @@ -62,7 +62,7 @@ public class GetTermVectorsCheckDocFreqTests extends ElasticsearchIntegrationTes } @Test - public void testSimpleTermVectors() throws ElasticsearchException, IOException { + public void testSimpleTermVectors() throws IOException { XContentBuilder mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") .startObject("properties") .startObject("field") diff --git a/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java b/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java index d99d03ec0b4..5828f644670 100644 --- a/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java +++ b/src/test/java/org/elasticsearch/action/termvectors/GetTermVectorsTests.java @@ -184,7 +184,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testSimpleTermVectors() throws ElasticsearchException, IOException { + public void testSimpleTermVectors() throws IOException { XContentBuilder mapping = jsonBuilder().startObject().startObject("type1") .startObject("properties") .startObject("field") @@ -222,7 +222,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testRandomSingleTermVectors() throws ElasticsearchException, IOException { + public void testRandomSingleTermVectors() throws IOException { FieldType ft = new FieldType(); int config = randomInt(6); boolean storePositions = false; @@ -410,7 +410,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws ElasticsearchException, IOException { + public void testRandomPayloadWithDelimitedPayloadTokenFilter() throws IOException { //create the test document int encoding = randomIntBetween(0, 2); String encodingString = ""; @@ -578,7 +578,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { // like testSimpleTermVectors but we create fields with no term vectors @Test - public void testSimpleTermVectorsWithGenerate() throws ElasticsearchException, IOException { + public void testSimpleTermVectorsWithGenerate() throws IOException { String[] fieldNames = new String[10]; for (int i = 0; i < fieldNames.length; i++) { fieldNames[i] = "field" + String.valueOf(i); @@ -630,7 +630,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } } - private void checkBrownFoxTermVector(Fields fields, String fieldName, boolean withPayloads) throws ElasticsearchException, IOException { + private void checkBrownFoxTermVector(Fields fields, String fieldName, boolean withPayloads) throws IOException { String[] values = {"brown", "dog", "fox", "jumps", "lazy", "over", "quick", "the"}; int[] freq = {1, 1, 1, 1, 1, 1, 1, 2}; int[][] pos = {{2}, {8}, {3}, {4}, {7}, {5}, {1}, {0, 6}}; @@ -671,7 +671,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testDuelWithAndWithoutTermVectors() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testDuelWithAndWithoutTermVectors() throws IOException, ExecutionException, InterruptedException { // setup indices String[] indexNames = new String[] {"with_tv", "without_tv"}; assertAcked(prepareCreate(indexNames[0]) @@ -760,7 +760,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testSimpleWildCards() throws ElasticsearchException, IOException { + public void testSimpleWildCards() throws IOException { int numFields = 25; XContentBuilder mapping = jsonBuilder().startObject().startObject("type1").startObject("properties"); @@ -788,7 +788,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testArtificialVsExisting() throws ElasticsearchException, ExecutionException, InterruptedException, IOException { + public void testArtificialVsExisting() throws ExecutionException, InterruptedException, IOException { // setup indices ImmutableSettings.Builder settings = settingsBuilder() .put(indexSettings()) @@ -924,7 +924,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testPerFieldAnalyzer() throws ElasticsearchException, IOException { + public void testPerFieldAnalyzer() throws IOException { int numFields = 25; // setup mapping and document source @@ -1021,7 +1021,7 @@ public class GetTermVectorsTests extends AbstractTermVectorsTests { } @Test - public void testDfs() throws ElasticsearchException, ExecutionException, InterruptedException, IOException { + public void testDfs() throws ExecutionException, InterruptedException, IOException { logger.info("Setting up the index ..."); ImmutableSettings.Builder settings = settingsBuilder() .put(indexSettings()) diff --git a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java index 3a23269d055..1e533ef5333 100644 --- a/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java +++ b/src/test/java/org/elasticsearch/aliases/IndexAliasesTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.aliases; import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder; @@ -49,6 +48,7 @@ import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; +import java.util.Arrays; import java.util.Set; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -57,14 +57,13 @@ import java.util.concurrent.TimeUnit; import static com.google.common.collect.Sets.newHashSet; import static org.elasticsearch.client.Requests.createIndexRequest; import static org.elasticsearch.client.Requests.indexRequest; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.index.query.FilterBuilders.*; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.test.hamcrest.CollectionAssertions.hasKey; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; /** @@ -112,7 +111,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { try { indicesAliasesRequestBuilder.get(); fail("put alias should have been failed due to invalid filter"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]")); } @@ -121,7 +120,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { try { indicesAliasesRequestBuilder.get(); fail("put alias should have been failed due to invalid filter"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias1]")); } } @@ -196,7 +195,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { .get(); assertSearchResponse(searchResponse); Global global = searchResponse.getAggregations().get("global"); - Terms terms = global.getAggregations().get("test"); + Terms terms = global.getAggregations().get("test");System.out.println(searchResponse); assertThat(terms.getBuckets().size(), equalTo(4)); logger.info("--> checking single filtering alias search with global facets and sort"); @@ -405,26 +404,6 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { logger.info("--> checking counts before delete"); assertThat(client().prepareCount("bars").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(1L)); - - logger.info("--> delete by query from a single alias"); - client().prepareDeleteByQuery("bars").setQuery(QueryBuilders.termQuery("name", "test")).get(); - - logger.info("--> verify that only one record was deleted"); - assertThat(client().prepareCount("test1").setQuery(QueryBuilders.matchAllQuery()).get().getCount(), equalTo(3L)); - - logger.info("--> delete by query from an aliases pointing to two indices"); - client().prepareDeleteByQuery("foos").setQuery(QueryBuilders.matchAllQuery()).get(); - - logger.info("--> verify that proper records were deleted"); - SearchResponse searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "3", "4", "6", "7", "8"); - - logger.info("--> delete by query from an aliases and an index"); - client().prepareDeleteByQuery("tests", "test2").setQuery(QueryBuilders.matchAllQuery()).get(); - - logger.info("--> verify that proper records were deleted"); - searchResponse = client().prepareSearch("aliasToTests").setQuery(QueryBuilders.matchAllQuery()).get(); - assertHits(searchResponse.getHits(), "4"); } @@ -763,7 +742,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { try { assertAcked(admin().indices().prepareAliases().addAliasAction(AliasAction.newAddAliasAction(null, "alias1"))); fail("create alias should have failed due to null index"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat("Exception text does not contain \"Alias action [add]: [index] may not be empty string\"", e.getMessage(), containsString("Alias action [add]: [index] may not be empty string")); } @@ -780,7 +759,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { try { assertAcked(admin().indices().prepareAliases().addAlias((String) null, "empty-alias")); fail("create alias should have failed due to null index"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat("Exception text does not contain \"Alias action [add]: [index] may not be empty string\"", e.getMessage(), containsString("Alias action [add]: [index] may not be empty string")); } @@ -908,10 +887,10 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate("test") .addMapping("type", "field", "type=string") .setAliases("{\n" + - " \"alias1\" : {},\n" + - " \"alias2\" : {\"filter\" : {\"term\": {\"field\":\"value\"}}},\n" + - " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"}\n" + - "}")); + " \"alias1\" : {},\n" + + " \"alias2\" : {\"filter\" : {\"term\": {\"field\":\"value\"}}},\n" + + " \"alias3\" : { \"index_routing\" : \"index\", \"search_routing\" : \"search\"}\n" + + "}")); checkAliases(); } @@ -924,7 +903,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { try { createIndexRequestBuilder.get(); fail("create index should have failed due to invalid alias filter"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias2]")); } @@ -934,7 +913,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { try { createIndexRequestBuilder.get(); fail("create index should have failed due to invalid alias filter"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [alias2]")); } } @@ -948,8 +927,8 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { .addAlias("test", "a", FilterBuilders.termFilter("field1", "term")) .get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getRootCause(), instanceOf(QueryParsingException.class)); + } catch (IllegalArgumentException e) { + assertThat(e.getCause(), instanceOf(QueryParsingException.class)); } try { @@ -957,8 +936,8 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { .addAlias("test", "a", FilterBuilders.rangeFilter("field2").from(0).to(1)) .get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getRootCause(), instanceOf(QueryParsingException.class)); + } catch (IllegalArgumentException e) { + assertThat(e.getCause(), instanceOf(QueryParsingException.class)); } client().admin().indices().prepareAliases() @@ -969,7 +948,7 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { @Test public void testAliasFilterWithNowInRangeFilterAndQuery() throws Exception { assertAcked(prepareCreate("my-index").addMapping("my-type", "_timestamp", "enabled=true")); - assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeFilter("_timestamp").cache(randomBoolean()).from("now-1d").to("now"))); + assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", rangeFilter("_timestamp").from("now-1d").to("now"))); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", queryFilter(rangeQuery("_timestamp").from("now-1d").to("now")))); final int numDocs = scaledRandomIntBetween(5, 52); @@ -989,13 +968,56 @@ public class IndexAliasesTests extends ElasticsearchIntegrationTest { @Test public void testAliasesFilterWithHasChildQuery() throws Exception { assertAcked(prepareCreate("my-index") - .addMapping("parent") - .addMapping("child", "_parent", "type=parent") + .addMapping("parent") + .addMapping("child", "_parent", "type=parent") ); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter1", hasChildFilter("child", matchAllQuery()))); assertAcked(admin().indices().prepareAliases().addAlias("my-index", "filter2", hasParentFilter("child", matchAllQuery()))); } + @Test + public void testAliasesWithBlocks() { + createIndex("test"); + ensureGreen(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", block); + + assertAcked(admin().indices().prepareAliases().addAlias("test", "alias1").addAlias("test", "alias2")); + assertAcked(admin().indices().prepareAliases().removeAlias("test", "alias1")); + assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true)); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_READ_ONLY); + + assertBlocked(admin().indices().prepareAliases().addAlias("test", "alias3"), INDEX_READ_ONLY_BLOCK); + assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_READ_ONLY_BLOCK); + assertThat(admin().indices().prepareGetAliases("alias2").execute().actionGet().getAliases().get("test").size(), equalTo(1)); + assertThat(admin().indices().prepareAliasesExist("alias2").get().exists(), equalTo(true)); + + } finally { + disableIndexBlock("test", SETTING_READ_ONLY); + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + + assertBlocked(admin().indices().prepareAliases().addAlias("test", "alias3"), INDEX_METADATA_BLOCK); + assertBlocked(admin().indices().prepareAliases().removeAlias("test", "alias2"), INDEX_METADATA_BLOCK); + assertBlocked(admin().indices().prepareGetAliases("alias2"), INDEX_METADATA_BLOCK); + assertBlocked(admin().indices().prepareAliasesExist("alias2"), INDEX_METADATA_BLOCK); + + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } + private void checkAliases() { GetAliasesResponse getAliasesResponse = admin().indices().prepareGetAliases("alias1").get(); assertThat(getAliasesResponse.getAliases().get("test").size(), equalTo(1)); diff --git a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java index 086966cef6b..8445f8a1f45 100644 --- a/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java +++ b/src/test/java/org/elasticsearch/benchmark/search/aggregations/TimeDataHistogramAggregationBenchmark.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; import org.elasticsearch.common.StopWatch; @@ -211,7 +210,7 @@ public class TimeDataHistogramAggregationBenchmark { private static SearchResponse doTermsAggsSearch(String name, String field, float matchPercentage) { SearchResponse response = client.prepareSearch() .setSize(0) - .setQuery(QueryBuilders.constantScoreQuery(FilterBuilders.scriptFilter("random()() { - @Override - public BenchmarkMessageRequest newInstance() { - return new BenchmarkMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + transportServiceServer.registerRequestHandler("benchmark", BenchmarkMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new BenchmarkMessageResponse(request)); diff --git a/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java index be3717b65d3..4fb24db7f91 100644 --- a/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java +++ b/src/test/java/org/elasticsearch/benchmark/transport/TransportBenchmark.java @@ -80,17 +80,7 @@ public class TransportBenchmark { final DiscoveryNode node = new DiscoveryNode("server", serverTransportService.boundAddress().publishAddress(), Version.CURRENT); - serverTransportService.registerHandler("benchmark", new BaseTransportRequestHandler() { - @Override - public BenchmarkMessageRequest newInstance() { - return new BenchmarkMessageRequest(); - } - - @Override - public String executor() { - return executor; - } - + serverTransportService.registerRequestHandler("benchmark", BenchmarkMessageRequest.class, executor, new TransportRequestHandler() { @Override public void messageReceived(BenchmarkMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new BenchmarkMessageResponse(request)); diff --git a/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java index a68fccf4f58..3fbdfa142ac 100644 --- a/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java +++ b/src/test/java/org/elasticsearch/blocks/SimpleBlocksTests.java @@ -49,14 +49,14 @@ public class SimpleBlocksTests extends ElasticsearchIntegrationTest { canIndexExists("test1"); // cluster.read_only = true: block write and metadata - setClusterReadOnly("true"); + setClusterReadOnly(true); canNotCreateIndex("test2"); // even if index has index.read_only = false canNotIndexDocument("test1"); - canNotIndexExists("test1"); + canIndexExists("test1"); // cluster.read_only = false: removes the block - setClusterReadOnly("false"); + setClusterReadOnly(false); canCreateIndex("test2"); canIndexDocument("test2"); canIndexDocument("test1"); @@ -71,7 +71,7 @@ public class SimpleBlocksTests extends ElasticsearchIntegrationTest { // adds index write and metadata block setIndexReadOnly( "ro", "true"); canNotIndexDocument("ro"); - canNotIndexExists("ro"); + canIndexExists("ro"); // other indices not blocked canCreateIndex("rw"); @@ -156,11 +156,6 @@ public class SimpleBlocksTests extends ElasticsearchIntegrationTest { } } - private void setClusterReadOnly(String value) { - Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build(); - client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).execute().actionGet(); - } - private void setIndexReadOnly(String index, Object value) { HashMap newSettings = new HashMap<>(); newSettings.put(IndexMetaData.SETTING_READ_ONLY, value); diff --git a/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java new file mode 100644 index 00000000000..4c2ddcd47eb --- /dev/null +++ b/src/test/java/org/elasticsearch/bootstrap/SecurityTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.bootstrap; + +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.ByteArrayInputStream; +import java.io.FilePermission; +import java.nio.file.Path; +import java.security.Policy; +import java.security.ProtectionDomain; +import java.security.URIParameter; + +public class SecurityTests extends ElasticsearchTestCase { + + /** test generated permissions */ + public void testGeneratedPermissions() throws Exception { + Path path = createTempDir(); + // make a fake ES home and ensure we only grant permissions to that. + Path esHome = path.resolve("esHome"); + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + settingsBuilder.put("path.home", esHome.toString()); + Settings settings = settingsBuilder.build(); + + Environment environment = new Environment(settings); + Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); + + ProtectionDomain domain = getClass().getProtectionDomain(); + Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); + // the fake es home + assertTrue(policy.implies(domain, new FilePermission(esHome.toString(), "read"))); + // its parent + assertFalse(policy.implies(domain, new FilePermission(path.toString(), "read"))); + // some other sibling + assertFalse(policy.implies(domain, new FilePermission(path.resolve("other").toString(), "read"))); + } + + /** test generated permissions for all configured paths */ + public void testEnvironmentPaths() throws Exception { + Path path = createTempDir(); + + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + settingsBuilder.put("path.home", path.resolve("home").toString()); + settingsBuilder.put("path.conf", path.resolve("conf").toString()); + settingsBuilder.put("path.plugins", path.resolve("plugins").toString()); + settingsBuilder.putArray("path.data", path.resolve("data1").toString(), path.resolve("data2").toString()); + settingsBuilder.put("path.logs", path.resolve("logs").toString()); + Settings settings = settingsBuilder.build(); + + Environment environment = new Environment(settings); + Path policyFile = Security.processTemplate(new ByteArrayInputStream(new byte[0]), environment); + + ProtectionDomain domain = getClass().getProtectionDomain(); + Policy policy = Policy.getInstance("JavaPolicy", new URIParameter(policyFile.toUri())); + + // check that all directories got permissions: + // homefile: this is needed unless we break out rules for "lib" dir. + // TODO: make read-only + assertTrue(policy.implies(domain, new FilePermission(environment.homeFile().toString(), "read,readlink,write,delete"))); + // config file + // TODO: make read-only + assertTrue(policy.implies(domain, new FilePermission(environment.configFile().toString(), "read,readlink,write,delete"))); + // plugins: r/w, TODO: can this be minimized? + assertTrue(policy.implies(domain, new FilePermission(environment.pluginsFile().toString(), "read,readlink,write,delete"))); + // data paths: r/w + for (Path dataPath : environment.dataFiles()) { + assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + } + for (Path dataPath : environment.dataWithClusterFiles()) { + assertTrue(policy.implies(domain, new FilePermission(dataPath.toString(), "read,readlink,write,delete"))); + } + // logs: r/w + assertTrue(policy.implies(domain, new FilePermission(environment.logsFile().toString(), "read,readlink,write,delete"))); + } +} diff --git a/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java b/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java index 3a4aaa89feb..05723ac1b69 100644 --- a/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java +++ b/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java @@ -21,7 +21,6 @@ package org.elasticsearch.bwcompat; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.lucene.index.Fields; import org.apache.lucene.util.English; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; @@ -31,8 +30,6 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.get.*; import org.elasticsearch.action.index.IndexRequestBuilder; @@ -395,7 +392,7 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())) .addMapping("type", mapping)); } catch (MapperParsingException ex) { - assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(ex.getCause(), instanceOf(IllegalArgumentException.class)); assertThat(ExceptionsHelper.detailedMessage(ex).contains("type=_field_names is not supported on indices created before version 1.3.0"), equalTo(true)); } @@ -478,36 +475,6 @@ public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompa return client().admin().cluster().prepareState().get().getState().nodes().masterNode().getVersion(); } - @Test - public void testDeleteByQuery() throws ExecutionException, InterruptedException { - createIndex("test"); - ensureYellow("test"); - - int numDocs = iterations(10, 50); - IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs + 1]; - for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", "value"); - } - indexRequestBuilders[numDocs] = client().prepareIndex("test", "test", Integer.toString(numDocs)).setSource("field", "other_value"); - indexRandom(true, indexRequestBuilders); - - SearchResponse searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo((long) numDocs + 1)); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(QueryBuilders.termQuery("field", "value")).get(); - assertThat(deleteByQueryResponse.getIndices().size(), equalTo(1)); - for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : deleteByQueryResponse) { - assertThat(indexDeleteByQueryResponse.getIndex(), equalTo("test")); - assertThat(indexDeleteByQueryResponse.getShardInfo().getFailures().length, equalTo(0)); - } - - refresh(); - searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); - } - @Test public void testDeleteRoutingRequired() throws ExecutionException, InterruptedException, IOException { createIndexWithAlias(); diff --git a/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java b/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java index 3eb69750894..c55681d3fd2 100644 --- a/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/ClusterStateBackwardsCompatTests.java @@ -19,16 +19,25 @@ package org.elasticsearch.bwcompat; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest; import org.junit.Test; -import static org.hamcrest.Matchers.*; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.hamcrest.Matchers.equalTo; public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsCompatIntegrationTest { @@ -36,13 +45,9 @@ public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsComp public void testClusterState() throws Exception { createIndex("test"); - NodesInfoResponse nodesInfo = client().admin().cluster().prepareNodesInfo().execute().actionGet(); - Settings settings = ImmutableSettings.settingsBuilder().put("client.transport.ignore_cluster_name", true) - .put("node.name", "transport_client_" + getTestName()).build(); - // connect to each node with a custom TransportClient, issue a ClusterStateRequest to test serialization - for (NodeInfo n : nodesInfo.getNodes()) { - try (TransportClient tc = new TransportClient(settings)) { + for (NodeInfo n : clusterNodes()) { + try (TransportClient tc = newTransportClient()) { tc.addTransportAddress(n.getNode().address()); ClusterStateResponse response = tc.admin().cluster().prepareState().execute().actionGet(); @@ -52,4 +57,53 @@ public class ClusterStateBackwardsCompatTests extends ElasticsearchBackwardsComp } } } + + @Test + public void testClusterStateWithBlocks() { + createIndex("test-blocks"); + + Map blocks = new HashMap<>(); + blocks.put(SETTING_BLOCKS_READ, IndexMetaData.INDEX_READ_BLOCK); + blocks.put(SETTING_BLOCKS_WRITE, IndexMetaData.INDEX_WRITE_BLOCK); + blocks.put(SETTING_BLOCKS_METADATA, IndexMetaData.INDEX_METADATA_BLOCK); + + for (Map.Entry block : blocks.entrySet()) { + try { + enableIndexBlock("test-blocks", block.getKey()); + + for (NodeInfo n : clusterNodes()) { + try (TransportClient tc = newTransportClient()) { + tc.addTransportAddress(n.getNode().address()); + + ClusterStateResponse response = tc.admin().cluster().prepareState().setIndices("test-blocks") + .setBlocks(true).setNodes(false).execute().actionGet(); + + ClusterBlocks clusterBlocks = response.getState().blocks(); + assertNotNull(clusterBlocks); + assertTrue(clusterBlocks.hasIndexBlock("test-blocks", block.getValue())); + + for (ClusterBlockLevel level : block.getValue().levels()) { + assertTrue(clusterBlocks.indexBlocked(level, "test-blocks")); + } + + IndexMetaData indexMetaData = response.getState().getMetaData().getIndices().get("test-blocks"); + assertNotNull(indexMetaData); + assertTrue(indexMetaData.settings().getAsBoolean(block.getKey(), null)); + } + } + } finally { + disableIndexBlock("test-blocks", block.getKey()); + } + } + } + + private NodesInfoResponse clusterNodes() { + return client().admin().cluster().prepareNodesInfo().execute().actionGet(); + } + + private TransportClient newTransportClient() { + Settings settings = ImmutableSettings.settingsBuilder().put("client.transport.ignore_cluster_name", true) + .put("node.name", "transport_client_" + getTestName()).build(); + return new TransportClient(settings); + } } diff --git a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java index 4178534405f..2ff82d9f464 100644 --- a/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java +++ b/src/test/java/org/elasticsearch/bwcompat/OldIndexBackwardsCompatibilityTests.java @@ -72,6 +72,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +// needs at least 2 nodes since it bumps replicas to 1 @ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) @LuceneTestCase.SuppressFileSystems("ExtrasFS") @LuceneTestCase.Slow @@ -204,6 +205,7 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio } return FileVisitResult.CONTINUE; } + @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (file.getFileName().toString().equals(IndexWriter.WRITE_LOCK_NAME)) { @@ -225,7 +227,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio void unloadIndex(String indexName) throws Exception { ElasticsearchAssertions.assertAcked(client().admin().indices().prepareDelete(indexName).get()); - ElasticsearchAssertions.assertAllFilesClosed(); } public void testAllVersionsTested() throws Exception { @@ -285,7 +286,6 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio version.luceneVersion.minor == Version.CURRENT.luceneVersion.minor; } - void assertIndexSanity(String indexName) { GetIndexResponse getIndexResponse = client().admin().indices().prepareGetIndex().addIndices(indexName).get(); assertEquals(1, getIndexResponse.indices().length); @@ -311,7 +311,14 @@ public class OldIndexBackwardsCompatibilityTests extends ElasticsearchIntegratio searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), FilterBuilders.existsFilter("string"))); searchRsp = searchReq.get(); ElasticsearchAssertions.assertNoFailures(searchRsp); - assertThat(numDocs, equalTo(searchRsp.getHits().getTotalHits())); + assertEquals(numDocs, searchRsp.getHits().getTotalHits()); + + logger.info("--> testing missing filter"); + // the field for the missing filter here needs to be different than the exists filter above, to avoid being found in the cache + searchReq = client().prepareSearch(indexName).setQuery(QueryBuilders.filteredQuery(QueryBuilders.matchAllQuery(), FilterBuilders.missingFilter("long_sort"))); + searchRsp = searchReq.get(); + ElasticsearchAssertions.assertNoFailures(searchRsp); + assertEquals(0, searchRsp.getHits().getTotalHits()); } void assertBasicAggregationWorks(String indexName) { diff --git a/src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java b/src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java deleted file mode 100644 index a45c080eb68..00000000000 --- a/src/test/java/org/elasticsearch/bwcompat/ParentChildDeleteByQueryBackwardsCompatibilityTest.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.bwcompat; - -import org.elasticsearch.Version; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.elasticsearch.index.query.QueryBuilders.hasChildQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; -import static org.hamcrest.core.Is.is; - -/** - */ -public class ParentChildDeleteByQueryBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest { - - @BeforeClass - public static void checkVersion() { - assumeTrue("parent child queries in delete by query is forbidden from 1.1.2 and up", globalCompatibilityVersion().onOrBefore(Version.V_1_1_1)); - } - - @Override - public void assertAllShardsOnNodes(String index, String pattern) { - super.assertAllShardsOnNodes(index, pattern); - } - - @Override - protected Settings externalNodeSettings(int nodeOrdinal) { - return ImmutableSettings.builder() - .put(super.externalNodeSettings(nodeOrdinal)) - .put("index.translog.disable_flush", true) - .build(); - } - - @Test - public void testHasChild() throws Exception { - assertAcked(prepareCreate("idx") - .setSettings(ImmutableSettings.builder() - .put(indexSettings()) - .put("index.refresh_interval", "-1") - .put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()) - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - - List requests = new ArrayList<>(); - requests.add(client().prepareIndex("idx", "parent", "1").setSource("{}")); - requests.add(client().prepareIndex("idx", "child", "1").setParent("1").setSource("{}")); - indexRandom(true, requests); - - SearchResponse response = client().prepareSearch("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); - - client().prepareDeleteByQuery("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - refresh(); - - response = client().prepareSearch("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertNoFailures(response); - assertHitCount(response, 0); - - client().prepareIndex("idx", "type", "1").setSource("{}").get(); - assertThat(client().prepareGet("idx", "type", "1").get().isExists(), is(true)); - - backwardsCluster().upgradeAllNodes(); - backwardsCluster().allowOnAllNodes("idx"); - ensureGreen("idx"); - - response = client().prepareSearch("idx") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertNoFailures(response); - assertHitCount(response, 1); // The delete by query has failed on recovery so that parent doc is still there - - // But the rest of the recovery did execute, we just skipped over the delete by query with the p/c query. - assertThat(client().prepareGet("idx", "type", "1").get().isExists(), is(true)); - response = client().prepareSearch("idx").setTypes("type").get(); - assertNoFailures(response); - assertHitCount(response, 1); - } - -} diff --git a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java index 022eb815f2c..05561a9dec0 100644 --- a/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java +++ b/src/test/java/org/elasticsearch/client/AbstractClientHeadersTests.java @@ -23,8 +23,6 @@ import com.google.common.base.Throwables; import com.google.common.collect.ImmutableMap; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.GenericAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownAction; -import org.elasticsearch.action.admin.cluster.node.shutdown.NodesShutdownResponse; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteAction; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; @@ -80,7 +78,7 @@ public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase { IndexAction.INSTANCE, // cluster admin actions - ClusterStatsAction.INSTANCE, CreateSnapshotAction.INSTANCE, NodesShutdownAction.INSTANCE, ClusterRerouteAction.INSTANCE, + ClusterStatsAction.INSTANCE, CreateSnapshotAction.INSTANCE, ClusterRerouteAction.INSTANCE, // indices admin actions CreateIndexAction.INSTANCE, IndicesStatsAction.INSTANCE, ClearIndicesCacheAction.INSTANCE, FlushAction.INSTANCE @@ -119,7 +117,6 @@ public abstract class AbstractClientHeadersTests extends ElasticsearchTestCase { // choosing arbitrary cluster admin actions to test client.admin().cluster().prepareClusterStats().execute().addListener(new AssertingActionListener(ClusterStatsAction.NAME)); client.admin().cluster().prepareCreateSnapshot("repo", "bck").execute().addListener(new AssertingActionListener(CreateSnapshotAction.NAME)); - client.admin().cluster().prepareNodesShutdown("n1", "n2").execute().addListener(new AssertingActionListener(NodesShutdownAction.NAME)); client.admin().cluster().prepareReroute().execute().addListener(new AssertingActionListener(ClusterRerouteAction.NAME)); // choosing arbitrary indices admin actions to test diff --git a/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java b/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java index fc722c46c8c..fcf64e05187 100644 --- a/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java +++ b/src/test/java/org/elasticsearch/client/transport/FailAndRetryMockTransport.java @@ -173,17 +173,17 @@ abstract class FailAndRetryMockTransport imp } @Override - public Transport start() throws ElasticsearchException { + public Transport start() { return null; } @Override - public Transport stop() throws ElasticsearchException { + public Transport stop() { return null; } @Override - public void close() throws ElasticsearchException { + public void close() { } diff --git a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java index dcf35d2dff6..8e347935241 100644 --- a/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java +++ b/src/test/java/org/elasticsearch/client/transport/TransportClientTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.node.Node; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; @@ -57,7 +58,6 @@ public class TransportClientTests extends ElasticsearchIntegrationTest { .put("path.home", createTempDir()) .put("node.name", "testNodeVersionIsUpdated") .put("http.enabled", false) - .put("index.store.type", "ram") .put("config.ignore_system_properties", true) // make sure we get what we set :) .build()).clusterName("foobar").build(); node.start(); diff --git a/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java b/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java deleted file mode 100644 index 31abbc2c020..00000000000 --- a/src/test/java/org/elasticsearch/cluster/BlockClusterStatsTests.java +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.cluster; - -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; -import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.client.Requests; -import org.elasticsearch.cluster.block.ClusterBlock; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import static org.elasticsearch.test.ElasticsearchIntegrationTest.*; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.*; - -/** - * Scoped as test, because the if the test with cluster read only block fails, all other tests fail as well, as this is not cleaned up properly - */ -@ClusterScope(scope= Scope.TEST) -public class BlockClusterStatsTests extends ElasticsearchIntegrationTest { - - @Test - public void testBlocks() throws Exception { - assertAcked(prepareCreate("foo").addAlias(new Alias("foo-alias"))); - try { - assertAcked(client().admin().indices().prepareUpdateSettings("foo").setSettings( - ImmutableSettings.settingsBuilder().put("index.blocks.read_only", true))); - ClusterUpdateSettingsResponse updateSettingsResponse = client().admin().cluster().prepareUpdateSettings().setTransientSettings( - ImmutableSettings.settingsBuilder().put("cluster.blocks.read_only", true).build()).get(); - assertThat(updateSettingsResponse.isAcknowledged(), is(true)); - - ClusterStateResponse clusterStateResponseUnfiltered = client().admin().cluster().prepareState().setLocal(true).clear().setBlocks(true).get(); - assertThat(clusterStateResponseUnfiltered.getState().blocks().global(), hasSize(1)); - assertThat(clusterStateResponseUnfiltered.getState().blocks().indices().size(), is(1)); - ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().clear().get(); - assertThat(clusterStateResponse.getState().blocks().global(), hasSize(0)); - assertThat(clusterStateResponse.getState().blocks().indices().size(), is(0)); - - try { - client().admin().indices().prepareClose("foo-alias").get(); - fail("close index should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().preparePutMapping("foo-alias").setType("type1").setSource("field1", "type=string").get(); - fail("put mapping should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().preparePutWarmer("foo-alias").setSearchRequest(Requests.searchRequest("foo-alias")).get(); - fail("put warmer should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().prepareDeleteWarmer().setIndices("foo-alias").setNames("warmer1").get(); - fail("delete warmer should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().prepareTypesExists("foo-alias").setTypes("test").get(); - fail("types exists should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - try { - client().admin().indices().prepareExists("foo-alias").get(); - fail("indices exists should have failed"); - } catch(ClusterBlockException e) { - assertClusterAndIndexBlocks(e); - } - - } finally { - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - ImmutableSettings.settingsBuilder().put("cluster.blocks.read_only", false).build()).get()); - assertAcked(client().admin().indices().prepareUpdateSettings("foo").setSettings( - ImmutableSettings.settingsBuilder().put("index.blocks.read_only", false))); - } - } - - private void assertClusterAndIndexBlocks(ClusterBlockException e) { - assertThat(e.blocks().size(), equalTo(2)); - for (ClusterBlock clusterBlock : e.blocks()) { - assertThat(clusterBlock.status(), equalTo(RestStatus.FORBIDDEN)); - assertThat(clusterBlock.id(), either(equalTo(5)).or(equalTo(6))); - assertThat(clusterBlock.description(), either(containsString("cluster read-only (api)")).or(containsString("index read-only (api)"))); - } - } -} diff --git a/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java index cd393ff88cd..8b183212893 100644 --- a/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java +++ b/src/test/java/org/elasticsearch/cluster/ClusterServiceTests.java @@ -21,10 +21,12 @@ package org.elasticsearch.cluster; import com.google.common.base.Predicate; import com.google.common.util.concurrent.ListenableFuture; -import org.elasticsearch.ElasticsearchException; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.InternalClusterService; import org.elasticsearch.cluster.service.PendingClusterTask; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; @@ -38,6 +40,8 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.AbstractPlugin; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Test; @@ -48,6 +52,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.*; /** @@ -721,6 +726,215 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest { } } + @Test + @TestLogging("cluster:TRACE") // To ensure that we log cluster state events on TRACE level + public void testClusterStateUpdateLogging() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test1", "cluster.service", Level.DEBUG, "*processing [test1]: took * no change in cluster_state")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.TRACE, "*failed to execute cluster state update in *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.DEBUG, "*processing [test3]: took * done applying updated cluster_state (version: *, uuid: *)")); + + Logger rootLogger = Logger.getRootLogger(); + rootLogger.addAppender(mockAppender); + try { + final CountDownLatch latch = new CountDownLatch(4); + clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail(); + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + }); + clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return ClusterState.builder(currentState).incrementVersion().build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + // Additional update task to make sure all previous logging made it to the logger + // We don't check logging for this on since there is no guarantee that it will occur before our check + clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + assertThat(latch.await(1, TimeUnit.SECONDS), equalTo(true)); + } finally { + rootLogger.removeAppender(mockAppender); + } + mockAppender.assertAllExpectationsMatched(); + } + + @Test + @TestLogging("cluster:WARN") // To ensure that we log cluster state events on WARN level + public void testLongClusterStateUpdateLogging() throws Exception { + Settings settings = settingsBuilder() + .put("discovery.type", "local") + .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10s") + .build(); + internalCluster().startNode(settings); + ClusterService clusterService1 = internalCluster().getInstance(ClusterService.class); + MockLogAppender mockAppender = new MockLogAppender(); + mockAppender.addExpectation(new MockLogAppender.UnseenEventExpectation("test1 shouldn't see because setting is too low", "cluster.service", Level.WARN, "*cluster state update task [test1] took * above the warn threshold of *")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test2", "cluster.service", Level.WARN, "*cluster state update task [test2] took * above the warn threshold of 10ms")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test3", "cluster.service", Level.WARN, "*cluster state update task [test3] took * above the warn threshold of 10ms")); + mockAppender.addExpectation(new MockLogAppender.SeenEventExpectation("test4", "cluster.service", Level.WARN, "*cluster state update task [test4] took * above the warn threshold of 10ms")); + + Logger rootLogger = Logger.getRootLogger(); + rootLogger.addAppender(mockAppender); + try { + final CountDownLatch latch = new CountDownLatch(5); + final CountDownLatch processedFirstTask = new CountDownLatch(1); + clusterService1.submitStateUpdateTask("test1", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Thread.sleep(100); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + processedFirstTask.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + + processedFirstTask.await(1, TimeUnit.SECONDS); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settingsBuilder() + .put(InternalClusterService.SETTING_CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD, "10ms"))); + + clusterService1.submitStateUpdateTask("test2", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Thread.sleep(100); + throw new IllegalArgumentException("Testing handling of exceptions in the cluster state task"); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + fail(); + } + + @Override + public void onFailure(String source, Throwable t) { + latch.countDown(); + } + }); + clusterService1.submitStateUpdateTask("test3", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Thread.sleep(100); + return ClusterState.builder(currentState).incrementVersion().build(); + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + clusterService1.submitStateUpdateTask("test4", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) throws Exception { + Thread.sleep(100); + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + // Additional update task to make sure all previous logging made it to the logger + // We don't check logging for this on since there is no guarantee that it will occur before our check + clusterService1.submitStateUpdateTask("test5", new ProcessedClusterStateUpdateTask() { + @Override + public ClusterState execute(ClusterState currentState) { + return currentState; + } + + @Override + public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { + latch.countDown(); + } + + @Override + public void onFailure(String source, Throwable t) { + fail(); + } + }); + assertThat(latch.await(5, TimeUnit.SECONDS), equalTo(true)); + } finally { + rootLogger.removeAppender(mockAppender); + } + mockAppender.assertAllExpectationsMatched(); + } + private static class BlockingTask extends ClusterStateUpdateTask { private final CountDownLatch latch = new CountDownLatch(1); @@ -816,15 +1030,15 @@ public class ClusterServiceTests extends ElasticsearchIntegrationTest { } @Override - protected void doStart() throws ElasticsearchException { + protected void doStart() { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { } @Override - protected void doClose() throws ElasticsearchException { + protected void doClose() { } @Override diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java new file mode 100644 index 00000000000..33008fd63d2 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffPublishingTests.java @@ -0,0 +1,625 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.discovery.Discovery; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.discovery.zen.DiscoveryNodesProvider; +import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction; +import org.elasticsearch.node.service.NodeService; +import org.elasticsearch.node.settings.NodeSettingsService; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportConnectionListener; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.transport.local.LocalTransport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.google.common.collect.Maps.newHashMap; +import static org.hamcrest.Matchers.*; + +public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase { + + protected ThreadPool threadPool; + protected Map nodes = newHashMap(); + + public static class MockNode { + public final DiscoveryNode discoveryNode; + public final MockTransportService service; + public final PublishClusterStateAction action; + public final MockDiscoveryNodesProvider nodesProvider; + + public MockNode(DiscoveryNode discoveryNode, MockTransportService service, PublishClusterStateAction action, MockDiscoveryNodesProvider nodesProvider) { + this.discoveryNode = discoveryNode; + this.service = service; + this.action = action; + this.nodesProvider = nodesProvider; + } + + public void connectTo(DiscoveryNode node) { + service.connectToNode(node); + nodesProvider.addNode(node); + } + } + + public MockNode createMockNode(final String name, Settings settings, Version version) throws Exception { + return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid()); + newStateProcessed.onNewClusterStateProcessed(); + } + }); + } + + public MockNode createMockNode(String name, Settings settings, Version version, PublishClusterStateAction.NewClusterStateListener listener) throws Exception { + MockTransportService service = buildTransportService( + ImmutableSettings.builder().put(settings).put("name", name, TransportService.SETTING_TRACE_LOG_INCLUDE, "", TransportService.SETTING_TRACE_LOG_EXCLUDE, "NOTHING").build(), + version + ); + DiscoveryNode discoveryNode = new DiscoveryNode(name, name, service.boundAddress().publishAddress(), ImmutableMap.of(), version); + MockDiscoveryNodesProvider nodesProvider = new MockDiscoveryNodesProvider(discoveryNode); + PublishClusterStateAction action = buildPublishClusterStateAction(settings, service, nodesProvider, listener); + MockNode node = new MockNode(discoveryNode, service, action, nodesProvider); + nodesProvider.addNode(discoveryNode); + final CountDownLatch latch = new CountDownLatch(nodes.size() * 2 + 1); + TransportConnectionListener waitForConnection = new TransportConnectionListener() { + @Override + public void onNodeConnected(DiscoveryNode node) { + latch.countDown(); + } + + @Override + public void onNodeDisconnected(DiscoveryNode node) { + fail("disconnect should not be called " + node); + } + }; + node.service.addConnectionListener(waitForConnection); + for (MockNode curNode : nodes.values()) { + curNode.service.addConnectionListener(waitForConnection); + curNode.connectTo(node.discoveryNode); + node.connectTo(curNode.discoveryNode); + } + node.connectTo(node.discoveryNode); + assertThat("failed to wait for all nodes to connect", latch.await(5, TimeUnit.SECONDS), equalTo(true)); + for (MockNode curNode : nodes.values()) { + curNode.service.removeConnectionListener(waitForConnection); + } + node.service.removeConnectionListener(waitForConnection); + if (nodes.put(name, node) != null) { + fail("Node with the name " + name + " already exist"); + } + return node; + } + + public MockTransportService service(String name) { + MockNode node = nodes.get(name); + if (node != null) { + return node.service; + } + return null; + } + + public PublishClusterStateAction action(String name) { + MockNode node = nodes.get(name); + if (node != null) { + return node.action; + } + return null; + } + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new ThreadPool(getClass().getName()); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + for (MockNode curNode : nodes.values()) { + curNode.action.close(); + curNode.service.close(); + } + terminate(threadPool); + } + + protected MockTransportService buildTransportService(Settings settings, Version version) { + MockTransportService transportService = new MockTransportService(settings, new LocalTransport(settings, threadPool, version), threadPool); + transportService.start(); + return transportService; + } + + protected PublishClusterStateAction buildPublishClusterStateAction(Settings settings, MockTransportService transportService, MockDiscoveryNodesProvider nodesProvider, + PublishClusterStateAction.NewClusterStateListener listener) { + DiscoverySettings discoverySettings = new DiscoverySettings(settings, new NodeSettingsService(settings)); + return new PublishClusterStateAction(settings, transportService, nodesProvider, listener, discoverySettings); + } + + + static class MockDiscoveryNodesProvider implements DiscoveryNodesProvider { + + private DiscoveryNodes discoveryNodes = DiscoveryNodes.EMPTY_NODES; + + public MockDiscoveryNodesProvider(DiscoveryNode localNode) { + discoveryNodes = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id()).build(); + } + + public void addNode(DiscoveryNode node) { + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(node).build(); + } + + @Override + public DiscoveryNodes nodes() { + return discoveryNodes; + } + + @Override + public NodeService nodeService() { + assert false; + throw new UnsupportedOperationException("Shouldn't be here"); + } + } + + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSimpleClusterStatePublishing() throws Exception { + MockNewClusterStateListener mockListenerA = new MockNewClusterStateListener(); + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerA); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + + // cluster state update - add nodeB + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(1)); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - remove block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // Adding new node - this node should get full cluster state while nodeB should still be getting diffs + + MockNewClusterStateListener mockListenerC = new MockNewClusterStateListener(); + MockNode nodeC = createMockNode("nodeC", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerC); + + // cluster state update 3 - register node C + previousClusterState = clusterState; + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeC.discoveryNode).build(); + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }); + mockListenerC.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + // First state + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update 4 - update settings + previousClusterState = clusterState; + MetaData metaData = MetaData.builder(clusterState.metaData()).transientSettings(ImmutableSettings.settingsBuilder().put("foo", "bar").build()).build(); + clusterState = ClusterState.builder(clusterState).metaData(metaData).incrementVersion().build(); + NewClusterStateExpectation expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + assertThat(clusterState.blocks().global().size(), equalTo(0)); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - skipping one version change - should request full cluster state + previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - skipping one version change - should request full cluster state + previousClusterState = ClusterState.builder(clusterState).incrementVersion().build(); + clusterState = ClusterState.builder(clusterState).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerB.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // node B becomes the master and sends a version of the cluster state that goes back + discoveryNodes = DiscoveryNodes.builder(discoveryNodes) + .put(nodeA.discoveryNode) + .put(nodeB.discoveryNode) + .put(nodeC.discoveryNode) + .build(); + previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + expectation = new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }; + mockListenerA.add(expectation); + mockListenerC.add(expectation); + publishStateDiffAndWait(nodeB.action, clusterState, previousClusterState); + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testUnexpectedDiffPublishing() throws Exception { + + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testDisablingDiffPublishing() throws Exception { + Settings noDiffPublishingSettings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_DIFF_ENABLE, false).build(); + + MockNode nodeA = createMockNode("nodeA", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff()); + assertFalse(clusterState.wasReadFromDiff()); + newStateProcessed.onNewClusterStateProcessed(); + } + }); + + // Initial cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + + // cluster state update - add nodeB + discoveryNodes = DiscoveryNodes.builder(discoveryNodes).put(nodeB.discoveryNode).build(); + ClusterState previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).nodes(discoveryNodes).incrementVersion().build(); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + } + + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSimultaneousClusterStatePublishing() throws Exception { + int numberOfNodes = randomIntBetween(2, 10); + int numberOfIterations = randomIntBetween(50, 200); + Settings settings = ImmutableSettings.builder().put(DiscoverySettings.PUBLISH_TIMEOUT, "100ms").put(DiscoverySettings.PUBLISH_DIFF_ENABLE, true).build(); + MockNode[] nodes = new MockNode[numberOfNodes]; + DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); + for (int i = 0; i < nodes.length; i++) { + final String name = "node" + i; + nodes[i] = createMockNode(name, settings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public synchronized void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + assertProperMetaDataForVersion(clusterState.metaData(), clusterState.version()); + if (randomInt(10) < 2) { + // Cause timeouts from time to time + try { + Thread.sleep(randomInt(110)); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + } + } + newStateProcessed.onNewClusterStateProcessed(); + } + }); + discoveryNodesBuilder.put(nodes[i].discoveryNode); + } + + AssertingAckListener[] listeners = new AssertingAckListener[numberOfIterations]; + DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); + MetaData metaData = MetaData.EMPTY_META_DATA; + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).metaData(metaData).build(); + ClusterState previousState; + for (int i = 0; i < numberOfIterations; i++) { + previousState = clusterState; + metaData = buildMetaDataForVersion(metaData, i + 1); + clusterState = ClusterState.builder(clusterState).incrementVersion().metaData(metaData).nodes(discoveryNodes).build(); + listeners[i] = publishStateDiff(nodes[0].action, clusterState, previousState); + } + + for (int i = 0; i < numberOfIterations; i++) { + listeners[i].await(1, TimeUnit.SECONDS); + } + } + + @Test + @TestLogging("cluster:DEBUG,discovery.zen.publish:DEBUG") + public void testSerializationFailureDuringDiffPublishing() throws Exception { + + MockNode nodeA = createMockNode("nodeA", ImmutableSettings.EMPTY, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() { + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + fail("Shouldn't send cluster state to myself"); + } + }); + + MockNewClusterStateListener mockListenerB = new MockNewClusterStateListener(); + MockNode nodeB = createMockNode("nodeB", ImmutableSettings.EMPTY, Version.CURRENT, mockListenerB); + + // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(nodeA.discoveryNode).put(nodeB.discoveryNode).localNodeId(nodeA.discoveryNode.id()).build(); + ClusterState previousClusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterState = ClusterState.builder(previousClusterState).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertFalse(clusterState.wasReadFromDiff()); + } + }); + publishStateDiffAndWait(nodeA.action, clusterState, previousClusterState); + + // cluster state update - add block + previousClusterState = clusterState; + clusterState = ClusterState.builder(clusterState).blocks(ClusterBlocks.builder().addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK)).incrementVersion().build(); + mockListenerB.add(new NewClusterStateExpectation() { + @Override + public void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed) { + assertTrue(clusterState.wasReadFromDiff()); + } + }); + + ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) { + @Override + public Diff diff(ClusterState previousState) { + return new Diff() { + @Override + public ClusterState apply(ClusterState part) { + fail("this diff shouldn't be applied"); + return part; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + throw new IOException("Simulated failure of diff serialization"); + } + }; + } + }; + List> errors = publishStateDiff(nodeA.action, unserializableClusterState, previousClusterState).awaitErrors(1, TimeUnit.SECONDS); + assertThat(errors.size(), equalTo(1)); + assertThat(errors.get(0).v2().getMessage(), containsString("Simulated failure of diff serialization")); + } + + private MetaData buildMetaDataForVersion(MetaData metaData, long version) { + ImmutableOpenMap.Builder indices = ImmutableOpenMap.builder(metaData.indices()); + indices.put("test" + version, IndexMetaData.builder("test" + version).settings(ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards((int) version).numberOfReplicas(0).build()); + return MetaData.builder(metaData) + .transientSettings(ImmutableSettings.builder().put("test", version).build()) + .indices(indices.build()) + .build(); + } + + private void assertProperMetaDataForVersion(MetaData metaData, long version) { + for (long i = 1; i <= version; i++) { + assertThat(metaData.index("test" + i), notNullValue()); + assertThat(metaData.index("test" + i).numberOfShards(), equalTo((int) i)); + } + assertThat(metaData.index("test" + (version + 1)), nullValue()); + assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version))); + } + + public void publishStateDiffAndWait(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + publishStateDiff(action, state, previousState).await(1, TimeUnit.SECONDS); + } + + public AssertingAckListener publishStateDiff(PublishClusterStateAction action, ClusterState state, ClusterState previousState) throws InterruptedException { + AssertingAckListener assertingAckListener = new AssertingAckListener(state.nodes().getSize() - 1); + ClusterChangedEvent changedEvent = new ClusterChangedEvent("test update", state, previousState); + action.publish(changedEvent, assertingAckListener); + return assertingAckListener; + } + + public static class AssertingAckListener implements Discovery.AckListener { + private final List> errors = new CopyOnWriteArrayList<>(); + private final AtomicBoolean timeoutOccured = new AtomicBoolean(); + private final CountDownLatch countDown; + + public AssertingAckListener(int nodeCount) { + countDown = new CountDownLatch(nodeCount); + } + + @Override + public void onNodeAck(DiscoveryNode node, @Nullable Throwable t) { + if (t != null) { + errors.add(new Tuple<>(node, t)); + } + countDown.countDown(); + } + + @Override + public void onTimeout() { + timeoutOccured.set(true); + // Fast forward the counter - no reason to wait here + long currentCount = countDown.getCount(); + for (long i = 0; i < currentCount; i++) { + countDown.countDown(); + } + } + + public void await(long timeout, TimeUnit unit) throws InterruptedException { + assertThat(awaitErrors(timeout, unit), emptyIterable()); + } + + public List> awaitErrors(long timeout, TimeUnit unit) throws InterruptedException { + countDown.await(timeout, unit); + assertFalse(timeoutOccured.get()); + return errors; + } + + } + + public interface NewClusterStateExpectation { + void check(ClusterState clusterState, PublishClusterStateAction.NewClusterStateListener.NewStateProcessed newStateProcessed); + } + + public static class MockNewClusterStateListener implements PublishClusterStateAction.NewClusterStateListener { + CopyOnWriteArrayList expectations = new CopyOnWriteArrayList(); + + @Override + public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) { + final NewClusterStateExpectation expectation; + try { + expectation = expectations.remove(0); + } catch (ArrayIndexOutOfBoundsException ex) { + fail("Unexpected cluster state update " + clusterState.prettyPrint()); + return; + } + expectation.check(clusterState, newStateProcessed); + newStateProcessed.onNewClusterStateProcessed(); + } + + public void add(NewClusterStateExpectation expectation) { + expectations.add(expectation); + } + } + + public static class DelegatingClusterState extends ClusterState { + + public DelegatingClusterState(ClusterState clusterState) { + super(clusterState.version(), clusterState.uuid(), clusterState); + } + + + } + +} diff --git a/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java new file mode 100644 index 00000000000..b49b7586dc3 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/ClusterStateDiffTests.java @@ -0,0 +1,534 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster; + +import com.carrotsearch.hppc.cursors.ObjectCursor; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.metadata.*; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.*; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.LocalTransportAddress; +import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.index.query.FilterBuilders; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.List; + +import static org.elasticsearch.cluster.metadata.AliasMetaData.newAliasMetaDataBuilder; +import static org.elasticsearch.test.XContentTestUtils.convertToMap; +import static org.elasticsearch.test.XContentTestUtils.mapsEqualIgnoringArrayOrder; +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.Matchers.equalTo; + + +@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE, numDataNodes = 0, numClientNodes = 0) +public class ClusterStateDiffTests extends ElasticsearchIntegrationTest { + + @Test + public void testClusterStateDiffSerialization() throws Exception { + DiscoveryNode masterNode = new DiscoveryNode("master", new LocalTransportAddress("master"), Version.CURRENT); + DiscoveryNode otherNode = new DiscoveryNode("other", new LocalTransportAddress("other"), Version.CURRENT); + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().put(masterNode).put(otherNode).localNodeId(masterNode.id()).build(); + ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).build(); + ClusterState clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), otherNode); + + int iterationCount = randomIntBetween(10, 300); + for (int iteration = 0; iteration < iterationCount; iteration++) { + ClusterState previousClusterState = clusterState; + ClusterState previousClusterStateFromDiffs = clusterStateFromDiffs; + int changesCount = randomIntBetween(1, 4); + ClusterState.Builder builder = null; + for (int i = 0; i < changesCount; i++) { + if (i > 0) { + clusterState = builder.build(); + } + switch (randomInt(4)) { + case 0: + builder = randomNodes(clusterState); + break; + case 1: + builder = randomRoutingTable(clusterState); + break; + case 2: + builder = randomBlocks(clusterState); + break; + case 3: + case 4: + builder = randomMetaDataChanges(clusterState); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + clusterState = builder.incrementVersion().build(); + + if (randomIntBetween(0, 10) < 1) { + // Update cluster state via full serialization from time to time + clusterStateFromDiffs = ClusterState.Builder.fromBytes(ClusterState.Builder.toBytes(clusterState), previousClusterStateFromDiffs.nodes().localNode()); + } else { + // Update cluster states using diffs + Diff diffBeforeSerialization = clusterState.diff(previousClusterState); + BytesStreamOutput os = new BytesStreamOutput(); + diffBeforeSerialization.writeTo(os); + byte[] diffBytes = os.bytes().toBytes(); + Diff diff; + try (BytesStreamInput input = new BytesStreamInput(diffBytes)) { + diff = previousClusterStateFromDiffs.readDiffFrom(input); + clusterStateFromDiffs = diff.apply(previousClusterStateFromDiffs); + } + } + + + try { + // Check non-diffable elements + assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version())); + assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid())); + + // Check nodes + assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); + assertThat(clusterStateFromDiffs.nodes().localNodeId(), equalTo(previousClusterStateFromDiffs.nodes().localNodeId())); + assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes())); + for (ObjectCursor node : clusterStateFromDiffs.nodes().nodes().keys()) { + DiscoveryNode node1 = clusterState.nodes().get(node.value); + DiscoveryNode node2 = clusterStateFromDiffs.nodes().get(node.value); + assertThat(node1.version(), equalTo(node2.version())); + assertThat(node1.address(), equalTo(node2.address())); + assertThat(node1.attributes(), equalTo(node2.attributes())); + } + + // Check routing table + assertThat(clusterStateFromDiffs.routingTable().version(), equalTo(clusterState.routingTable().version())); + assertThat(clusterStateFromDiffs.routingTable().indicesRouting(), equalTo(clusterState.routingTable().indicesRouting())); + + // Check cluster blocks + assertThat(clusterStateFromDiffs.blocks().global(), equalTo(clusterStateFromDiffs.blocks().global())); + assertThat(clusterStateFromDiffs.blocks().indices(), equalTo(clusterStateFromDiffs.blocks().indices())); + assertThat(clusterStateFromDiffs.blocks().disableStatePersistence(), equalTo(clusterStateFromDiffs.blocks().disableStatePersistence())); + + // Check metadata + assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version())); + assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid())); + assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings())); + assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings())); + assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices())); + assertThat(clusterStateFromDiffs.metaData().templates(), equalTo(clusterState.metaData().templates())); + assertThat(clusterStateFromDiffs.metaData().customs(), equalTo(clusterState.metaData().customs())); + assertThat(clusterStateFromDiffs.metaData().aliases(), equalTo(clusterState.metaData().aliases())); + + // JSON Serialization test - make sure that both states produce similar JSON + assertThat(mapsEqualIgnoringArrayOrder(convertToMap(clusterStateFromDiffs), convertToMap(clusterState)), equalTo(true)); + + // Smoke test - we cannot compare bytes to bytes because some elements might get serialized in different order + // however, serialized size should remain the same + assertThat(ClusterState.Builder.toBytes(clusterStateFromDiffs).length, equalTo(ClusterState.Builder.toBytes(clusterState).length)); + } catch (AssertionError error) { + logger.error("Cluster state:\n{}\nCluster state from diffs:\n{}", clusterState.toString(), clusterStateFromDiffs.toString()); + throw error; + } + } + + logger.info("Final cluster state:[{}]", clusterState.toString()); + + } + + private ClusterState.Builder randomNodes(ClusterState clusterState) { + DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); + List nodeIds = randomSubsetOf(randomInt(clusterState.nodes().nodes().size() - 1), clusterState.nodes().nodes().keys().toArray(String.class)); + for (String nodeId : nodeIds) { + if (nodeId.startsWith("node-")) { + if (randomBoolean()) { + nodes.remove(nodeId); + } else { + nodes.put(new DiscoveryNode(nodeId, new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); + } + } + } + int additionalNodeCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalNodeCount; i++) { + nodes.put(new DiscoveryNode("node-" + randomAsciiOfLength(10), new LocalTransportAddress(randomAsciiOfLength(10)), randomVersion(random()))); + } + return ClusterState.builder(clusterState).nodes(nodes); + } + + private ClusterState.Builder randomRoutingTable(ClusterState clusterState) { + RoutingTable.Builder builder = RoutingTable.builder(clusterState.routingTable()); + int numberOfIndices = clusterState.routingTable().indicesRouting().size(); + if (numberOfIndices > 0) { + List randomIndices = randomSubsetOf(randomInt(numberOfIndices - 1), clusterState.routingTable().indicesRouting().keySet().toArray(new String[numberOfIndices])); + for (String index : randomIndices) { + if (randomBoolean()) { + builder.remove(index); + } else { + builder.add(randomIndexRoutingTable(index, clusterState.nodes().nodes().keys().toArray(String.class))); + } + } + } + int additionalIndexCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalIndexCount; i++) { + builder.add(randomIndexRoutingTable("index-" + randomInt(), clusterState.nodes().nodes().keys().toArray(String.class))); + } + return ClusterState.builder(clusterState).routingTable(builder.build()); + } + + private IndexRoutingTable randomIndexRoutingTable(String index, String[] nodeIds) { + IndexRoutingTable.Builder builder = IndexRoutingTable.builder(index); + int shardCount = randomInt(10); + + for (int i = 0; i < shardCount; i++) { + IndexShardRoutingTable.Builder indexShard = new IndexShardRoutingTable.Builder(new ShardId(index, i), randomBoolean()); + int replicaCount = randomIntBetween(1, 10); + for (int j = 0; j < replicaCount; j++) { + indexShard.addShard( + new MutableShardRouting(index, i, randomFrom(nodeIds), j == 0, ShardRoutingState.fromValue((byte) randomIntBetween(1, 4)), 1)); + } + builder.addIndexShard(indexShard.build()); + } + return builder.build(); + } + + private ClusterState.Builder randomBlocks(ClusterState clusterState) { + ClusterBlocks.Builder builder = ClusterBlocks.builder().blocks(clusterState.blocks()); + int globalBlocksCount = clusterState.blocks().global().size(); + if (globalBlocksCount > 0) { + List blocks = randomSubsetOf(randomInt(globalBlocksCount - 1), clusterState.blocks().global().toArray(new ClusterBlock[globalBlocksCount])); + for (ClusterBlock block : blocks) { + builder.removeGlobalBlock(block); + } + } + int additionalGlobalBlocksCount = randomIntBetween(1, 3); + for (int i = 0; i < additionalGlobalBlocksCount; i++) { + builder.addGlobalBlock(randomGlobalBlock()); + } + return ClusterState.builder(clusterState).blocks(builder); + } + + private ClusterBlock randomGlobalBlock() { + switch (randomInt(2)) { + case 0: + return DiscoverySettings.NO_MASTER_BLOCK_ALL; + case 1: + return DiscoverySettings.NO_MASTER_BLOCK_WRITES; + default: + return GatewayService.STATE_NOT_RECOVERED_BLOCK; + } + } + + private ClusterState.Builder randomMetaDataChanges(ClusterState clusterState) { + MetaData metaData = clusterState.metaData(); + int changesCount = randomIntBetween(1, 10); + for (int i = 0; i < changesCount; i++) { + switch (randomInt(3)) { + case 0: + metaData = randomMetaDataSettings(metaData); + break; + case 1: + metaData = randomIndices(metaData); + break; + case 2: + metaData = randomTemplates(metaData); + break; + case 3: + metaData = randomMetaDataCustoms(metaData); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + return ClusterState.builder(clusterState).metaData(MetaData.builder(metaData).version(metaData.version() + 1).build()); + } + + private Settings randomSettings(Settings settings) { + ImmutableSettings.Builder builder = ImmutableSettings.builder(); + if (randomBoolean()) { + builder.put(settings); + } + int settingsCount = randomInt(10); + for (int i = 0; i < settingsCount; i++) { + builder.put(randomAsciiOfLength(10), randomAsciiOfLength(10)); + } + return builder.build(); + + } + + private MetaData randomMetaDataSettings(MetaData metaData) { + if (randomBoolean()) { + return MetaData.builder(metaData).persistentSettings(randomSettings(metaData.persistentSettings())).build(); + } else { + return MetaData.builder(metaData).transientSettings(randomSettings(metaData.transientSettings())).build(); + } + } + + private interface RandomPart { + /** + * Returns list of parts from metadata + */ + ImmutableOpenMap parts(MetaData metaData); + + /** + * Puts the part back into metadata + */ + MetaData.Builder put(MetaData.Builder builder, T part); + + /** + * Remove the part from metadata + */ + MetaData.Builder remove(MetaData.Builder builder, String name); + + /** + * Returns a random part with the specified name + */ + T randomCreate(String name); + + /** + * Makes random modifications to the part + */ + T randomChange(T part); + + } + + private MetaData randomParts(MetaData metaData, String prefix, RandomPart randomPart) { + MetaData.Builder builder = MetaData.builder(metaData); + ImmutableOpenMap parts = randomPart.parts(metaData); + int partCount = parts.size(); + if (partCount > 0) { + List randomParts = randomSubsetOf(randomInt(partCount - 1), randomPart.parts(metaData).keys().toArray(String.class)); + for (String part : randomParts) { + if (randomBoolean()) { + randomPart.remove(builder, part); + } else { + randomPart.put(builder, randomPart.randomChange(parts.get(part))); + } + } + } + int additionalPartCount = randomIntBetween(1, 20); + for (int i = 0; i < additionalPartCount; i++) { + String name = randomName(prefix); + randomPart.put(builder, randomPart.randomCreate(name)); + } + return builder.build(); + } + + private MetaData randomIndices(MetaData metaData) { + return randomParts(metaData, "index", new RandomPart() { + + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.indices(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, IndexMetaData part) { + return builder.put(part, true); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.remove(name); + } + + @Override + public IndexMetaData randomCreate(String name) { + IndexMetaData.Builder builder = IndexMetaData.builder(name); + ImmutableSettings.Builder settingsBuilder = ImmutableSettings.builder(); + setRandomSettings(getRandom(), settingsBuilder); + settingsBuilder.put(randomSettings(ImmutableSettings.EMPTY)).put(IndexMetaData.SETTING_VERSION_CREATED, randomVersion(random())); + builder.settings(settingsBuilder); + builder.numberOfShards(randomIntBetween(1, 10)).numberOfReplicas(randomInt(10)); + int aliasCount = randomInt(10); + if (randomBoolean()) { + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + } + for (int i = 0; i < aliasCount; i++) { + builder.putAlias(randomAlias()); + } + return builder.build(); + } + + @Override + public IndexMetaData randomChange(IndexMetaData part) { + IndexMetaData.Builder builder = IndexMetaData.builder(part); + switch (randomIntBetween(0, 3)) { + case 0: + builder.settings(ImmutableSettings.builder().put(part.settings()).put(randomSettings(ImmutableSettings.EMPTY))); + break; + case 1: + if (randomBoolean() && part.aliases().isEmpty() == false) { + builder.removeAlias(randomFrom(part.aliases().keys().toArray(String.class))); + } else { + builder.putAlias(AliasMetaData.builder(randomAsciiOfLength(10))); + } + break; + case 2: + builder.settings(ImmutableSettings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID())); + break; + case 3: + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + break; + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + return builder.build(); + } + }); + } + + private IndexWarmersMetaData randomWarmers() { + if (randomBoolean()) { + return new IndexWarmersMetaData( + new IndexWarmersMetaData.Entry( + randomName("warm"), + new String[]{randomName("type")}, + randomBoolean(), + new BytesArray(randomAsciiOfLength(1000))) + ); + } else { + return new IndexWarmersMetaData(); + } + } + + private MetaData randomTemplates(MetaData metaData) { + return randomParts(metaData, "template", new RandomPart() { + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.templates(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, IndexTemplateMetaData part) { + return builder.put(part); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.removeTemplate(name); + } + + @Override + public IndexTemplateMetaData randomCreate(String name) { + IndexTemplateMetaData.Builder builder = IndexTemplateMetaData.builder(name); + builder.order(randomInt(1000)) + .template(randomName("temp")) + .settings(randomSettings(ImmutableSettings.EMPTY)); + int aliasCount = randomIntBetween(0, 10); + for (int i = 0; i < aliasCount; i++) { + builder.putAlias(randomAlias()); + } + if (randomBoolean()) { + builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers()); + } + return builder.build(); + } + + @Override + public IndexTemplateMetaData randomChange(IndexTemplateMetaData part) { + IndexTemplateMetaData.Builder builder = new IndexTemplateMetaData.Builder(part); + builder.order(randomInt(1000)); + return builder.build(); + } + }); + } + + private AliasMetaData randomAlias() { + AliasMetaData.Builder builder = newAliasMetaDataBuilder(randomName("alias")); + if (randomBoolean()) { + builder.filter(FilterBuilders.termFilter("test", randomRealisticUnicodeOfCodepointLength(10)).toString()); + } + if (randomBoolean()) { + builder.routing(randomAsciiOfLength(10)); + } + return builder.build(); + } + + private MetaData randomMetaDataCustoms(final MetaData metaData) { + return randomParts(metaData, "custom", new RandomPart() { + + @Override + public ImmutableOpenMap parts(MetaData metaData) { + return metaData.customs(); + } + + @Override + public MetaData.Builder put(MetaData.Builder builder, MetaData.Custom part) { + if (part instanceof SnapshotMetaData) { + return builder.putCustom(SnapshotMetaData.TYPE, part); + } else if (part instanceof RepositoriesMetaData) { + return builder.putCustom(RepositoriesMetaData.TYPE, part); + } else if (part instanceof RestoreMetaData) { + return builder.putCustom(RestoreMetaData.TYPE, part); + } + throw new IllegalArgumentException("Unknown custom part " + part); + } + + @Override + public MetaData.Builder remove(MetaData.Builder builder, String name) { + return builder.removeCustom(name); + } + + @Override + public MetaData.Custom randomCreate(String name) { + switch (randomIntBetween(0, 2)) { + case 0: + return new SnapshotMetaData(new SnapshotMetaData.Entry( + new SnapshotId(randomName("repo"), randomName("snap")), + randomBoolean(), + SnapshotMetaData.State.fromValue((byte) randomIntBetween(0, 6)), + ImmutableList.of(), + Math.abs(randomLong()), + ImmutableMap.of())); + case 1: + return new RepositoriesMetaData(); + case 2: + return new RestoreMetaData(new RestoreMetaData.Entry( + new SnapshotId(randomName("repo"), randomName("snap")), + RestoreMetaData.State.fromValue((byte) randomIntBetween(0, 3)), + ImmutableList.of(), + ImmutableMap.of())); + default: + throw new IllegalArgumentException("Shouldn't be here"); + } + } + + @Override + public MetaData.Custom randomChange(MetaData.Custom part) { + return part; + } + }); + } + + private String randomName(String prefix) { + return prefix + Strings.randomBase64UUID(getRandom()); + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java index 16c49fb4c37..55b4a932c3a 100644 --- a/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java +++ b/src/test/java/org/elasticsearch/cluster/UpdateSettingsValidationTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -63,7 +62,7 @@ public class UpdateSettingsValidationTests extends ElasticsearchIntegrationTest try { client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put("index.refresh_interval", "")).execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { logger.info("Error message: [{}]", ex.getMessage()); } } diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java index 3305469c780..717aa891c6c 100644 --- a/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/cluster/ack/AckClusterUpdateSettingsTests.java @@ -42,7 +42,7 @@ import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; -@ClusterScope(scope = TEST) +@ClusterScope(scope = TEST, minNumDataNodes = 2) public class AckClusterUpdateSettingsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java index e1da078e3eb..00724aada21 100644 --- a/src/test/java/org/elasticsearch/cluster/ack/AckTests.java +++ b/src/test/java/org/elasticsearch/cluster/ack/AckTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsResponse; import org.elasticsearch.action.admin.indices.warmer.delete.DeleteWarmerResponse; @@ -52,11 +51,10 @@ import org.junit.Test; import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.*; -@ClusterScope(scope = SUITE) +@ClusterScope(minNumDataNodes = 2) public class AckTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java index 2ffd88b978e..b01e16f1ca8 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/AwarenessAllocationTests.java @@ -47,7 +47,7 @@ import static org.hamcrest.Matchers.equalTo; /** */ -@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes =0) +@ClusterScope(scope= ElasticsearchIntegrationTest.Scope.TEST, numDataNodes =0, minNumDataNodes = 2) public class AwarenessAllocationTests extends ElasticsearchIntegrationTest { private final ESLogger logger = Loggers.getLogger(AwarenessAllocationTests.class); diff --git a/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java index ffe9e4ea86a..69098eeafde 100644 --- a/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java +++ b/src/test/java/org/elasticsearch/cluster/allocation/ClusterRerouteTests.java @@ -24,6 +24,8 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingExplanations; @@ -31,7 +33,7 @@ import org.elasticsearch.cluster.routing.allocation.command.AllocateAllocationCo import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider; -import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.Allocation; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.FileSystemUtils; import org.elasticsearch.common.logging.ESLogger; @@ -47,9 +49,14 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; /** */ @@ -70,7 +77,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { @Test public void rerouteWithCommands_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) .build(); rerouteWithCommands(commonSettings); } @@ -148,7 +155,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { @Test public void rerouteWithAllocateLocalGateway_enableAllocationSettings() throws Exception { Settings commonSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) .build(); rerouteWithAllocateLocalGateway(commonSettings); } @@ -241,7 +248,7 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { logger.info("--> disable allocation"); Settings newSettings = settingsBuilder() - .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE, EnableAllocationDecider.Allocation.NONE.name()) + .put(CLUSTER_ROUTING_ALLOCATION_ENABLE, Allocation.NONE.name()) .build(); client().admin().cluster().prepareUpdateSettings().setTransientSettings(newSettings).execute().actionGet(); @@ -264,4 +271,51 @@ public class ClusterRerouteTests extends ElasticsearchIntegrationTest { assertThat(explanation.decisions().type(), equalTo(Decision.Type.YES)); } + @Test + public void testClusterRerouteWithBlocks() throws Exception { + List nodesIds = internalCluster().startNodesAsync(2).get(); + + logger.info("--> create an index with 1 shard and 0 replicas"); + assertAcked(prepareCreate("test-blocks").setSettings(settingsBuilder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))); + ensureGreen("test-blocks"); + + logger.info("--> check that the index has 1 shard"); + ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState(); + List shards = state.routingTable().allShards("test-blocks"); + assertThat(shards, hasSize(1)); + + logger.info("--> check that the shard is allocated"); + ShardRouting shard = shards.get(0); + assertThat(shard.assignedToNode(), equalTo(true)); + + logger.info("--> retrieve the node where the shard is allocated"); + DiscoveryNode node = state.nodes().resolveNode(shard.currentNodeId()); + assertNotNull(node); + + // toggle is used to mve the shard from one node to another + int toggle = nodesIds.indexOf(node.getName()); + + // Rerouting shards is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test-blocks", blockSetting); + assertAcked(client().admin().cluster().prepareReroute() + .add(new MoveAllocationCommand(new ShardId("test-blocks", 0), nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); + + ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForRelocatingShards(0).execute().actionGet(); + assertThat(healthResponse.isTimedOut(), equalTo(false)); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Rerouting shards is blocked when the cluster is read only + try { + setClusterReadOnly(true); + assertBlocked(client().admin().cluster().prepareReroute() + .add(new MoveAllocationCommand(new ShardId("test-blocks", 1), nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2)))); + } finally { + setClusterReadOnly(false); + } + } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java b/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java new file mode 100644 index 00000000000..efccb3f778c --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/block/ClusterBlockTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.block; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.BytesStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.util.EnumSet; + +import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.CoreMatchers.equalTo; + +public class ClusterBlockTests extends ElasticsearchTestCase { + + @Test + public void testSerialization() throws Exception { + int iterations = randomIntBetween(10, 100); + for (int i = 0; i < iterations; i++) { + // Get a random version + Version version = randomVersion(random()); + + // Get a random list of ClusterBlockLevels + EnumSet levels = EnumSet.noneOf(ClusterBlockLevel.class); + int nbLevels = randomIntBetween(1, ClusterBlockLevel.values().length); + for (int j = 0; j < nbLevels; j++) { + levels.add(randomFrom(ClusterBlockLevel.values())); + } + + ClusterBlock clusterBlock = new ClusterBlock(randomInt(), "cluster block #" + randomInt(), randomBoolean(), + randomBoolean(), randomFrom(RestStatus.values()), levels); + + BytesStreamOutput out = new BytesStreamOutput(); + out.setVersion(version); + clusterBlock.writeTo(out); + + BytesStreamInput in = new BytesStreamInput(out.bytes()); + in.setVersion(version); + ClusterBlock result = ClusterBlock.readClusterBlock(in); + + assertThat(result.id(), equalTo(clusterBlock.id())); + assertThat(result.status(), equalTo(clusterBlock.status())); + assertThat(result.description(), equalTo(clusterBlock.description())); + assertThat(result.retryable(), equalTo(clusterBlock.retryable())); + assertThat(result.disableStatePersistence(), equalTo(clusterBlock.disableStatePersistence())); + assertArrayEquals(result.levels().toArray(), clusterBlock.levels().toArray()); + } + } +} diff --git a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java index e63927c7933..8576e0f19b0 100644 --- a/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java +++ b/src/test/java/org/elasticsearch/cluster/metadata/MetaDataTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.metadata; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.metadata.IndexMetaData.State; @@ -426,14 +425,14 @@ public class MetaDataTests extends ElasticsearchTestCase { try { md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoobar"); fail(); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); } try { md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "foofoobar"); fail(); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it")); } @@ -441,7 +440,8 @@ public class MetaDataTests extends ElasticsearchTestCase { md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foofoo-closed", "foofoobar"); fail(); } catch(IndexClosedException e) { - assertThat(e.getMessage(), containsString("[foofoo-closed] closed")); + assertThat(e.getMessage(), equalTo("closed")); + assertEquals(e.index().getName(), "foofoo-closed"); } String[] results = md.concreteIndices(IndicesOptions.strictSingleIndexNoExpandForbidClosed(), "foo", "barbaz"); @@ -655,7 +655,7 @@ public class MetaDataTests extends ElasticsearchTestCase { try { metadata.concreteIndices(indicesOptions, allIndices); fail("no wildcard expansion and null or empty list argument should trigger ElasticsearchIllegalArgumentException"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } } else { diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index 2c04b470ff6..7a1ffaecff6 100644 --- a/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.cluster.routing.allocation; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -129,14 +128,14 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocating to non-data node, should fail"); try { rerouteResult = allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node4", true))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocating with primary flag set to true"); @@ -158,7 +157,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocate the replica shard on on the second node"); @@ -183,7 +182,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new AllocateAllocationCommand(new ShardId("test", 0), "node3", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } } @@ -225,7 +224,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> start the primary shard"); @@ -239,7 +238,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> allocate the replica shard on on the second node"); @@ -273,7 +272,7 @@ public class AllocationCommandsTests extends ElasticsearchAllocationTestCase { try { allocation.reroute(clusterState, new AllocationCommands(new CancelAllocationCommand(new ShardId("test", 0), "node1", false))); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } logger.info("--> start the replica shard"); diff --git a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java index c298067b250..3eb177643cd 100644 --- a/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java +++ b/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDeciderTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.cluster.routing.allocation.decider; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterInfoService; @@ -44,7 +43,6 @@ import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchAllocationTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.Arrays; @@ -787,7 +785,7 @@ public class DiskThresholdDeciderTests extends ElasticsearchAllocationTestCase { // throw an exception about not being able to complete strategy.reroute(clusterState, cmds).routingTable(); fail("should not have been able to reroute the shard"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat("can't allocated because there isn't enough room: " + e.getMessage(), e.getMessage().contains("less than required [30.0%] free disk on node, free: [26.0%]"), equalTo(true)); } diff --git a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index cbbff463f20..83a27850591 100644 --- a/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -81,7 +81,7 @@ public class ClusterSerializationTests extends ElasticsearchAllocationTestCase { RoutingTable source = strategy.reroute(clusterState).routingTable(); BytesStreamOutput outStream = new BytesStreamOutput(); - RoutingTable.Builder.writeTo(source, outStream); + source.writeTo(outStream); BytesStreamInput inStream = new BytesStreamInput(outStream.bytes().toBytes()); RoutingTable target = RoutingTable.Builder.readFrom(inStream); diff --git a/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java new file mode 100644 index 00000000000..d87d900a0e8 --- /dev/null +++ b/src/test/java/org/elasticsearch/cluster/serialization/DiffableTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.serialization; + +import com.google.common.collect.ImmutableMap; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.DiffableUtils; +import org.elasticsearch.cluster.DiffableUtils.KeyedReader; +import org.elasticsearch.cluster.AbstractDiffable; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.io.stream.*; +import org.elasticsearch.test.ElasticsearchTestCase; +import org.junit.Test; + +import java.io.IOException; +import java.util.Map; + +import static com.google.common.collect.Maps.newHashMap; +import static org.hamcrest.CoreMatchers.equalTo; + +public class DiffableTests extends ElasticsearchTestCase { + + @Test + public void testImmutableMapDiff() throws IOException { + ImmutableMap.Builder builder = ImmutableMap.builder(); + builder.put("foo", new TestDiffable("1")); + builder.put("bar", new TestDiffable("2")); + builder.put("baz", new TestDiffable("3")); + ImmutableMap before = builder.build(); + Map map = newHashMap(); + map.putAll(before); + map.remove("bar"); + map.put("baz", new TestDiffable("4")); + map.put("new", new TestDiffable("5")); + ImmutableMap after = ImmutableMap.copyOf(map); + Diff diff = DiffableUtils.diff(before, after); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + ImmutableMap serialized = DiffableUtils.readImmutableMapDiff(in, TestDiffable.PROTO).apply(before); + assertThat(serialized.size(), equalTo(3)); + assertThat(serialized.get("foo").value(), equalTo("1")); + assertThat(serialized.get("baz").value(), equalTo("4")); + assertThat(serialized.get("new").value(), equalTo("5")); + } + + @Test + public void testImmutableOpenMapDiff() throws IOException { + ImmutableOpenMap.Builder builder = ImmutableOpenMap.builder(); + builder.put("foo", new TestDiffable("1")); + builder.put("bar", new TestDiffable("2")); + builder.put("baz", new TestDiffable("3")); + ImmutableOpenMap before = builder.build(); + builder = ImmutableOpenMap.builder(before); + builder.remove("bar"); + builder.put("baz", new TestDiffable("4")); + builder.put("new", new TestDiffable("5")); + ImmutableOpenMap after = builder.build(); + Diff diff = DiffableUtils.diff(before, after); + BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + BytesStreamInput in = new BytesStreamInput(out.bytes()); + ImmutableOpenMap serialized = DiffableUtils.readImmutableOpenMapDiff(in, new KeyedReader() { + @Override + public TestDiffable readFrom(StreamInput in, String key) throws IOException { + return new TestDiffable(in.readString()); + } + + @Override + public Diff readDiffFrom(StreamInput in, String key) throws IOException { + return AbstractDiffable.readDiffFrom(new StreamableReader() { + @Override + public TestDiffable readFrom(StreamInput in) throws IOException { + return new TestDiffable(in.readString()); + } + }, in); + } + }).apply(before); + assertThat(serialized.size(), equalTo(3)); + assertThat(serialized.get("foo").value(), equalTo("1")); + assertThat(serialized.get("baz").value(), equalTo("4")); + assertThat(serialized.get("new").value(), equalTo("5")); + + } + public static class TestDiffable extends AbstractDiffable { + + public static final TestDiffable PROTO = new TestDiffable(""); + + private final String value; + + public TestDiffable(String value) { + this.value = value; + } + + public String value() { + return value; + } + + @Override + public TestDiffable readFrom(StreamInput in) throws IOException { + return new TestDiffable(in.readString()); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(value); + } + } + +} diff --git a/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java index 9947c1a12b8..3aae8923ff5 100644 --- a/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java +++ b/src/test/java/org/elasticsearch/cluster/settings/ClusterSettingsTests.java @@ -19,18 +19,23 @@ package org.elasticsearch.cluster.settings; +import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequestBuilder; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.allocation.decider.DisableAllocationDecider; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.DiscoverySettings; +import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; +import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.TEST; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.*; @ClusterScope(scope = TEST) @@ -52,7 +57,7 @@ public class ClusterSettingsTests extends ElasticsearchIntegrationTest { @Test public void clusterSettingsUpdateResponse() { - String key1 = "indices.cache.filter.size"; + String key1 = IndicesStore.INDICES_STORE_THROTTLE_MAX_BYTES_PER_SEC; int value1 = 10; String key2 = DisableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_DISABLE_ALLOCATION; @@ -141,4 +146,39 @@ public class ClusterSettingsTests extends ElasticsearchIntegrationTest { assertThat(response.getTransientSettings().getAsMap().entrySet(), Matchers.emptyIterable()); assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1l)); } + + @Test + public void testClusterUpdateSettingsWithBlocks() { + String key1 = "cluster.routing.allocation.enable"; + Settings transientSettings = ImmutableSettings.builder().put(key1, false).build(); + + String key2 = "cluster.routing.allocation.node_concurrent_recoveries"; + Settings persistentSettings = ImmutableSettings.builder().put(key2, "5").build(); + + ClusterUpdateSettingsRequestBuilder request = client().admin().cluster().prepareUpdateSettings() + .setTransientSettings(transientSettings) + .setPersistentSettings(persistentSettings); + + // Cluster settings updates are blocked when the cluster is read only + try { + setClusterReadOnly(true); + assertBlocked(request, MetaData.CLUSTER_READ_ONLY_BLOCK); + + // But it's possible to update the settings to update the "cluster.blocks.read_only" setting + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, false).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); + + } finally { + setClusterReadOnly(false); + } + + // It should work now + ClusterUpdateSettingsResponse response = request.execute().actionGet(); + + assertAcked(response); + assertThat(response.getTransientSettings().get(key1), notNullValue()); + assertThat(response.getTransientSettings().get(key2), nullValue()); + assertThat(response.getPersistentSettings().get(key1), nullValue()); + assertThat(response.getPersistentSettings().get(key2), notNullValue()); + } } diff --git a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java index 596d8289d17..f337bf5b67c 100644 --- a/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java +++ b/src/test/java/org/elasticsearch/cluster/shards/ClusterSearchShardsTests.java @@ -27,13 +27,17 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.*; +import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; /** */ -@ClusterScope(scope= Scope.SUITE, numDataNodes =2) +@ClusterScope(scope= Scope.SUITE, numDataNodes = 2) public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest { @Override @@ -123,4 +127,36 @@ public class ClusterSearchShardsTests extends ElasticsearchIntegrationTest { assertThat(seenTest2, equalTo(true)); assertThat(response.getNodes().length, equalTo(2)); } + + @Test + public void testClusterSearchShardsWithBlocks() { + createIndex("test-blocks"); + + NumShards numShards = getNumShards("test-blocks"); + + int docs = between(10, 100); + for (int i = 0; i < docs; i++) { + client().prepareIndex("test-blocks", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + ensureGreen("test-blocks"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test-blocks", blockSetting); + ClusterSearchShardsResponse response = client().admin().cluster().prepareSearchShards("test-blocks").execute().actionGet(); + assertThat(response.getGroups().length, equalTo(numShards.numPrimaries)); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().cluster().prepareSearchShards("test-blocks")); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/common/BooleansTests.java b/src/test/java/org/elasticsearch/common/BooleansTests.java index 24058ac5438..3bacaed7f5a 100644 --- a/src/test/java/org/elasticsearch/common/BooleansTests.java +++ b/src/test/java/org/elasticsearch/common/BooleansTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.Test; @@ -78,7 +77,7 @@ public class BooleansTests extends ElasticsearchTestCase { Booleans.parseBooleanExact(randomFrom(null, "fred", "foo", "barney")); fail("Expected exception while parsing invalid boolean value "); } catch (Exception ex) { - assertTrue(ex instanceof ElasticsearchIllegalArgumentException); + assertTrue(ex instanceof IllegalArgumentException); } } diff --git a/src/test/java/org/elasticsearch/common/ParseFieldTests.java b/src/test/java/org/elasticsearch/common/ParseFieldTests.java index ef4d2e093f6..7b0dacf8500 100644 --- a/src/test/java/org/elasticsearch/common/ParseFieldTests.java +++ b/src/test/java/org/elasticsearch/common/ParseFieldTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; import org.apache.commons.lang3.ArrayUtils; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -58,14 +57,14 @@ public class ParseFieldTests extends ElasticsearchTestCase { try { withDeprecations.match(randomFrom(deprecated), flags); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } try { withDeprecations.match("barFoo", flags); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } } @@ -96,7 +95,7 @@ public class ParseFieldTests extends ElasticsearchTestCase { try { field.match(randomFrom(allValues), flags); fail(); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } } } diff --git a/src/test/java/org/elasticsearch/common/PidFileTests.java b/src/test/java/org/elasticsearch/common/PidFileTests.java index 0c66b411c60..02f92f5b979 100644 --- a/src/test/java/org/elasticsearch/common/PidFileTests.java +++ b/src/test/java/org/elasticsearch/common/PidFileTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common; import com.google.common.base.Charsets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -35,7 +34,7 @@ import java.nio.file.StandardOpenOption; */ public class PidFileTests extends ElasticsearchTestCase { - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testParentIsFile() throws IOException { Path dir = createTempDir(); Path parent = dir.resolve("foo"); diff --git a/src/test/java/org/elasticsearch/common/TableTests.java b/src/test/java/org/elasticsearch/common/TableTests.java index 919e1c4dd51..032299cd1b2 100644 --- a/src/test/java/org/elasticsearch/common/TableTests.java +++ b/src/test/java/org/elasticsearch/common/TableTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -28,37 +27,37 @@ import java.util.Map; public class TableTests extends ElasticsearchTestCase { - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnStartRowWithoutHeader() { Table table = new Table(); table.startRow(); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnEndHeadersWithoutStart() { Table table = new Table(); table.endHeaders(); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnAddCellWithoutHeader() { Table table = new Table(); table.addCell("error"); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnAddCellWithoutRow() { Table table = this.getTableWithHeaders(); table.addCell("error"); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnEndRowWithoutStart() { Table table = this.getTableWithHeaders(); table.endRow(); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnLessCellsThanDeclared() { Table table = this.getTableWithHeaders(); table.startRow(); @@ -74,7 +73,7 @@ public class TableTests extends ElasticsearchTestCase { table.endRow(false); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailOnMoreCellsThanDeclared() { Table table = this.getTableWithHeaders(); table.startRow(); diff --git a/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java b/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java index 586d78ce538..1ffbb8d9a0d 100644 --- a/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java +++ b/src/test/java/org/elasticsearch/common/bytes/PagedBytesReferenceTest.java @@ -54,7 +54,7 @@ public class PagedBytesReferenceTest extends ElasticsearchTestCase { @Before public void setUp() throws Exception { super.setUp(); - bigarrays = new BigArrays(ImmutableSettings.EMPTY, null, new NoneCircuitBreakerService()); + bigarrays = new BigArrays(null, new NoneCircuitBreakerService()); } @Override diff --git a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java index 077970941f3..f86f4cf8556 100644 --- a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java +++ b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashMapTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.collect; import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.ImmutableMap; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import java.util.HashMap; @@ -138,14 +137,14 @@ public class CopyOnWriteHashMapTests extends ElasticsearchTestCase { try { new CopyOnWriteHashMap<>().copyAndPut("a", null); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } try { new CopyOnWriteHashMap<>().copyAndPut(null, "b"); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } } diff --git a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java index 86e4611c398..f489c47bc4a 100644 --- a/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java +++ b/src/test/java/org/elasticsearch/common/collect/CopyOnWriteHashSetTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.common.collect; import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import java.util.HashSet; @@ -120,7 +119,7 @@ public class CopyOnWriteHashSetTests extends ElasticsearchTestCase { try { new CopyOnWriteHashSet<>().copyAndAdd(null); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // expected } } diff --git a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java index 2c767d645bd..9a06b10d817 100644 --- a/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java +++ b/src/test/java/org/elasticsearch/common/geo/GeoJSONShapeParserTests.java @@ -27,7 +27,6 @@ import com.spatial4j.core.shape.ShapeCollection; import com.spatial4j.core.shape.jts.JtsGeometry; import com.spatial4j.core.shape.jts.JtsPoint; import com.vividsolutions.jts.geom.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.geo.builders.ShapeBuilder; import org.elasticsearch.common.xcontent.XContentFactory; @@ -35,7 +34,6 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.hamcrest.ElasticsearchGeoAssertions; -import org.junit.Test; import java.io.IOException; import java.util.ArrayList; @@ -539,7 +537,7 @@ public class GeoJSONShapeParserTests extends ElasticsearchTestCase { parser = JsonXContent.jsonXContent.createParser(invalidPoly); parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchIllegalArgumentException.class); + ElasticsearchGeoAssertions.assertValidException(parser, IllegalArgumentException.class); // test case 5: create an invalid polygon with 1 invalid LinearRing invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon") @@ -550,7 +548,7 @@ public class GeoJSONShapeParserTests extends ElasticsearchTestCase { parser = JsonXContent.jsonXContent.createParser(invalidPoly); parser.nextToken(); - ElasticsearchGeoAssertions.assertValidException(parser, ElasticsearchIllegalArgumentException.class); + ElasticsearchGeoAssertions.assertValidException(parser, IllegalArgumentException.class); // test case 6: create an invalid polygon with 0 LinearRings invalidPoly = XContentFactory.jsonBuilder().startObject().field("type", "polygon") diff --git a/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java index 9ee4c272580..54e2c735ed0 100644 --- a/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java +++ b/src/test/java/org/elasticsearch/common/io/streams/BytesStreamsTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.common.io.streams; import org.apache.lucene.util.Constants; import org.elasticsearch.common.io.stream.BytesStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.lucene.BytesRefs; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Ignore; @@ -263,8 +264,8 @@ public class BytesStreamsTests extends ElasticsearchTestCase { assumeTrue("requires a 64-bit JRE ... ?!", Constants.JRE_IS_64BIT); BytesStreamOutput out = new BytesStreamOutput(); out.writeBoolean(false); - out.writeByte((byte)1); - out.writeShort((short)-1); + out.writeByte((byte) 1); + out.writeShort((short) -1); out.writeInt(-1); out.writeVInt(2); out.writeLong(-3); @@ -281,6 +282,7 @@ public class BytesStreamsTests extends ElasticsearchTestCase { out.writeGenericValue(doubleArray); out.writeString("hello"); out.writeString("goodbye"); + out.writeGenericValue(BytesRefs.toBytesRef("bytesref")); BytesStreamInput in = new BytesStreamInput(out.bytes().toBytes()); assertThat(in.readBoolean(), equalTo(false)); assertThat(in.readByte(), equalTo((byte)1)); @@ -291,12 +293,13 @@ public class BytesStreamsTests extends ElasticsearchTestCase { assertThat(in.readVLong(), equalTo((long)4)); assertThat((double)in.readFloat(), closeTo(1.1, 0.0001)); assertThat(in.readDouble(), closeTo(2.2, 0.0001)); - assertThat(in.readGenericValue(), equalTo((Object)intArray)); + assertThat(in.readGenericValue(), equalTo((Object) intArray)); assertThat(in.readGenericValue(), equalTo((Object)longArray)); assertThat(in.readGenericValue(), equalTo((Object)floatArray)); assertThat(in.readGenericValue(), equalTo((Object)doubleArray)); assertThat(in.readString(), equalTo("hello")); assertThat(in.readString(), equalTo("goodbye")); + assertThat(in.readGenericValue(), equalTo((Object)BytesRefs.toBytesRef("bytesref"))); in.close(); out.close(); } diff --git a/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java b/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java new file mode 100644 index 00000000000..5192e6f9e28 --- /dev/null +++ b/src/test/java/org/elasticsearch/common/lucene/IndexCacheableQueryTests.java @@ -0,0 +1,140 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.MultiReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.Explanation; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.LRUQueryCache; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryCachingPolicy; +import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.Bits; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.IOException; +import java.util.Set; + +public class IndexCacheableQueryTests extends ElasticsearchTestCase { + + static class DummyIndexCacheableQuery extends IndexCacheableQuery { + @Override + public String toString(String field) { + return "DummyIndexCacheableQuery"; + } + + @Override + public Weight doCreateWeight(IndexSearcher searcher, boolean needsScores) throws IOException { + return new Weight(this) { + + @Override + public void extractTerms(Set terms) { + throw new UnsupportedOperationException(); + } + + @Override + public Explanation explain(LeafReaderContext context, int doc) throws IOException { + throw new UnsupportedOperationException(); + } + + @Override + public float getValueForNormalization() throws IOException { + return 0; + } + + @Override + public void normalize(float norm, float topLevelBoost) { + } + + @Override + public Scorer scorer(LeafReaderContext context, Bits acceptDocs) throws IOException { + return null; + } + + }; + } + } + + public void testBasics() throws IOException { + DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); + QueryUtils.check(query); + + Query rewritten = query.rewrite(new MultiReader(new IndexReader[0])); + QueryUtils.check(rewritten); + QueryUtils.checkUnequal(query, rewritten); + + Query rewritten2 = query.rewrite(new MultiReader(new IndexReader[0])); + QueryUtils.check(rewritten2); + QueryUtils.checkUnequal(rewritten, rewritten2); + } + + public void testCache() throws IOException { + Directory dir = newDirectory(); + LRUQueryCache cache = new LRUQueryCache(10000, Long.MAX_VALUE); + QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE; + RandomIndexWriter writer = new RandomIndexWriter(getRandom(), dir); + for (int i = 0; i < 10; ++i) { + writer.addDocument(new Document()); + } + + IndexReader reader = writer.getReader(); + IndexSearcher searcher = newSearcher(reader); + searcher.setQueryCache(cache); + searcher.setQueryCachingPolicy(policy); + + assertEquals(0, cache.getCacheSize()); + DummyIndexCacheableQuery query = new DummyIndexCacheableQuery(); + searcher.count(query); + int expectedCacheSize = reader.leaves().size(); + assertEquals(expectedCacheSize, cache.getCacheSize()); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + + writer.addDocument(new Document()); + + DirectoryReader reader2 = writer.getReader(); + searcher = newSearcher(reader2); + searcher.setQueryCache(cache); + searcher.setQueryCachingPolicy(policy); + + // since the query is only cacheable at the index level, it has to be recomputed on all leaves + expectedCacheSize += reader2.leaves().size(); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + searcher.count(query); + assertEquals(expectedCacheSize, cache.getCacheSize()); + + reader.close(); + reader2.close(); + writer.close(); + assertEquals(0, cache.getCacheSize()); + dir.close(); + } + +} diff --git a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java index e74f22cb6d8..816409675af 100644 --- a/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java +++ b/src/test/java/org/elasticsearch/common/lucene/LuceneTest.java @@ -32,6 +32,8 @@ import org.junit.Test; import java.io.IOException; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; /** * @@ -48,6 +50,55 @@ public class LuceneTest extends ElasticsearchTestCase { assertEquals(Lucene.VERSION, Version.LATEST); } + public void testWaitForIndex() throws Exception { + final MockDirectoryWrapper dir = newMockDirectory(); + + final AtomicBoolean succeeded = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + + // Create a shadow Engine, which will freak out because there is no + // index yet + Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + latch.await(); + if (Lucene.waitForIndex(dir, 5000)) { + succeeded.set(true); + } else { + fail("index should have eventually existed!"); + } + } catch (InterruptedException e) { + // ignore interruptions + } catch (Exception e) { + fail("should have been able to create the engine! " + e.getMessage()); + } + } + }); + t.start(); + + // count down latch + // now shadow engine should try to be created + latch.countDown(); + + dir.setEnableVirusScanner(false); + IndexWriterConfig iwc = newIndexWriterConfig(); + iwc.setIndexDeletionPolicy(NoDeletionPolicy.INSTANCE); + iwc.setMergePolicy(NoMergePolicy.INSTANCE); + iwc.setMaxBufferedDocs(2); + IndexWriter writer = new IndexWriter(dir, iwc); + Document doc = new Document(); + doc.add(new TextField("id", "1", random().nextBoolean() ? Field.Store.YES : Field.Store.NO)); + writer.addDocument(doc); + writer.commit(); + + t.join(); + + writer.close(); + dir.close(); + assertTrue("index should have eventually existed", succeeded.get()); + } + public void testCleanIndex() throws IOException { MockDirectoryWrapper dir = newMockDirectory(); dir.setEnableVirusScanner(false); @@ -93,8 +144,11 @@ public class LuceneTest extends ElasticsearchTestCase { } Lucene.cleanLuceneIndex(dir); if (dir.listAll().length > 0) { - assertEquals(dir.listAll().length, 1); - assertEquals(dir.listAll()[0], "write.lock"); + for (String file : dir.listAll()) { + if (file.startsWith("extra") == false) { + assertEquals(file, "write.lock"); + } + } } dir.close(); } @@ -149,7 +203,7 @@ public class LuceneTest extends ElasticsearchTestCase { assertEquals(s.search(new TermQuery(new Term("id", "4")), 1).totalHits, 0); for (String file : dir.listAll()) { - assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2")); + assertFalse("unexpected file: " + file, file.equals("segments_3") || file.startsWith("_2") || file.startsWith("extra")); } open.close(); dir.close(); diff --git a/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java b/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java new file mode 100644 index 00000000000..f6873d0c075 --- /dev/null +++ b/src/test/java/org/elasticsearch/common/lucene/ShardCoreKeyMapTests.java @@ -0,0 +1,137 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.common.lucene; + +import org.apache.lucene.document.Document; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +public class ShardCoreKeyMapTests extends ElasticsearchTestCase { + + public void testMissingShard() throws IOException { + try (Directory dir = newDirectory(); + RandomIndexWriter w = new RandomIndexWriter(random(), dir)) { + w.addDocument(new Document()); + try (IndexReader reader = w.getReader()) { + ShardCoreKeyMap map = new ShardCoreKeyMap(); + for (LeafReaderContext ctx : reader.leaves()) { + try { + map.add(ctx.reader()); + fail(); + } catch (IllegalArgumentException expected) { + // ok + } + } + } + } + } + + public void testBasics() throws IOException { + Directory dir1 = newDirectory(); + RandomIndexWriter w1 = new RandomIndexWriter(random(), dir1); + w1.addDocument(new Document()); + + Directory dir2 = newDirectory(); + RandomIndexWriter w2 = new RandomIndexWriter(random(), dir2); + w2.addDocument(new Document()); + + Directory dir3 = newDirectory(); + RandomIndexWriter w3 = new RandomIndexWriter(random(), dir3); + w3.addDocument(new Document()); + + ShardId shardId1 = new ShardId("index1", 1); + ShardId shardId2 = new ShardId("index1", 3); + ShardId shardId3 = new ShardId("index2", 2); + + ElasticsearchDirectoryReader reader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1); + ElasticsearchDirectoryReader reader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2); + ElasticsearchDirectoryReader reader3 = ElasticsearchDirectoryReader.wrap(w3.getReader(), shardId3); + + ShardCoreKeyMap map = new ShardCoreKeyMap(); + for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) { + for (LeafReaderContext ctx : reader.leaves()) { + map.add(ctx.reader()); + } + } + assertEquals(3, map.size()); + + // Adding them back is a no-op + for (LeafReaderContext ctx : reader1.leaves()) { + map.add(ctx.reader()); + } + assertEquals(3, map.size()); + + for (LeafReaderContext ctx : reader2.leaves()) { + assertEquals(shardId2, map.getShardId(ctx.reader().getCoreCacheKey())); + } + + w1.addDocument(new Document()); + ElasticsearchDirectoryReader newReader1 = ElasticsearchDirectoryReader.wrap(w1.getReader(), shardId1); + reader1.close(); + reader1 = newReader1; + + // same for reader2, but with a force merge to trigger evictions + w2.addDocument(new Document()); + w2.forceMerge(1); + ElasticsearchDirectoryReader newReader2 = ElasticsearchDirectoryReader.wrap(w2.getReader(), shardId2); + reader2.close(); + reader2 = newReader2; + + for (DirectoryReader reader : Arrays.asList(reader1, reader2, reader3)) { + for (LeafReaderContext ctx : reader.leaves()) { + map.add(ctx.reader()); + } + } + + final Set index1Keys = new HashSet<>(); + for (DirectoryReader reader : Arrays.asList(reader1, reader2)) { + for (LeafReaderContext ctx : reader.leaves()) { + index1Keys.add(ctx.reader().getCoreCacheKey()); + } + } + index1Keys.removeAll(map.getCoreKeysForIndex("index1")); + assertEquals(Collections.emptySet(), index1Keys); + + reader1.close(); + w1.close(); + reader2.close(); + w2.close(); + reader3.close(); + w3.close(); + assertEquals(0, map.size()); + + dir1.close(); + dir2.close(); + dir3.close(); + } + +} diff --git a/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java b/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java index 38054992298..5aabe4a594c 100644 --- a/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java +++ b/src/test/java/org/elasticsearch/common/lucene/index/FreqTermsEnumTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.search.Filter; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; @@ -158,7 +159,7 @@ public class FreqTermsEnumTests extends ElasticsearchTestCase { } } } - filter = Queries.wrap(new TermsQuery(filterTerms)); + filter = new QueryWrapperFilter(new TermsQuery(filterTerms)); } private void addFreqs(Document doc, Map reference) { diff --git a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java index 524ae7367a6..60d161b4121 100644 --- a/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java +++ b/src/test/java/org/elasticsearch/common/recycler/AbstractRecyclerTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.recycler; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.recycler.Recycler.V; import org.elasticsearch.test.ElasticsearchTestCase; @@ -121,7 +120,7 @@ public abstract class AbstractRecyclerTests extends ElasticsearchTestCase { v1.close(); try { v1.close(); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { // impl has protection against double release: ok return; } diff --git a/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java b/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java index e52b9f8ff81..5e0ab4f3b08 100644 --- a/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java +++ b/src/test/java/org/elasticsearch/common/unit/SizeValueTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.common.unit; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -63,7 +62,7 @@ public class SizeValueTests extends ElasticsearchTestCase { assertThat(SizeValue.parseSizeValue("1G").toString(), is(new SizeValue(1, SizeUnit.GIGA).toString())); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testThatNegativeValuesThrowException() { new SizeValue(-1); } diff --git a/src/test/java/org/elasticsearch/common/util/BigArraysTests.java b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java index 2013edb53a4..2e9497e5fd9 100644 --- a/src/test/java/org/elasticsearch/common/util/BigArraysTests.java +++ b/src/test/java/org/elasticsearch/common/util/BigArraysTests.java @@ -39,7 +39,7 @@ public class BigArraysTests extends ElasticsearchSingleNodeTest { public static BigArrays randombigArrays() { final PageCacheRecycler recycler = randomBoolean() ? null : ElasticsearchSingleNodeTest.getInstanceFromNode(PageCacheRecycler.class); - return new MockBigArrays(ImmutableSettings.EMPTY, recycler, new NoneCircuitBreakerService()); + return new MockBigArrays(recycler, new NoneCircuitBreakerService()); } private BigArrays bigArrays; @@ -339,7 +339,7 @@ public class BigArraysTests extends ElasticsearchSingleNodeTest { .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, size - 1) .build(), new NodeSettingsService(ImmutableSettings.EMPTY)); - BigArrays bigArrays = new BigArrays(ImmutableSettings.EMPTY, null, hcbs).withCircuitBreaking(); + BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); try { create.invoke(bigArrays, size); @@ -359,7 +359,7 @@ public class BigArraysTests extends ElasticsearchSingleNodeTest { .put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING, maxSize) .build(), new NodeSettingsService(ImmutableSettings.EMPTY)); - BigArrays bigArrays = new BigArrays(ImmutableSettings.EMPTY, null, hcbs).withCircuitBreaking(); + BigArrays bigArrays = new BigArrays(null, hcbs).withCircuitBreaking(); Method create = BigArrays.class.getMethod("new" + type + "Array", long.class); final int size = scaledRandomIntBetween(1, 20); BigArray array = (BigArray) create.invoke(bigArrays, size); diff --git a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java index 0f77e83c03a..d06c2613e19 100644 --- a/src/test/java/org/elasticsearch/count/query/CountQueryTests.java +++ b/src/test/java/org/elasticsearch/count/query/CountQueryTests.java @@ -373,26 +373,6 @@ public class CountQueryTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareCount().setQuery(bool).get(), 1l); } - @Test - public void testFiltersWithCustomCacheKey() throws Exception { - createIndex("test"); - ensureGreen(); - client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); - refresh(); - - CountResponse countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); - assertHitCount(countResponse, 1l); - - countResponse = client().prepareCount("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); - assertHitCount(countResponse, 1l); - } - @Test public void testMatchQueryNumeric() throws Exception { createIndex("test"); @@ -818,7 +798,7 @@ public class CountQueryTests extends ElasticsearchIntegrationTest { } @Test // see #2994 - public void testSimpleSpan() throws ElasticsearchException, IOException { + public void testSimpleSpan() throws IOException { createIndex("test"); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java b/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java deleted file mode 100644 index ffc8db1d36b..00000000000 --- a/src/test/java/org/elasticsearch/deleteByQuery/DeleteByQueryTests.java +++ /dev/null @@ -1,204 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.deleteByQuery; - -import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.action.ActionWriteResponse; -import org.elasticsearch.action.admin.indices.alias.Alias; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; -import org.elasticsearch.action.index.IndexRequestBuilder; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.IndicesOptions; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.indices.IndexMissingException; -import org.elasticsearch.rest.RestStatus; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import java.util.concurrent.ExecutionException; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; -import static org.hamcrest.Matchers.*; - -@Slow -public class DeleteByQueryTests extends ElasticsearchIntegrationTest { - - @Test - public void testDeleteAllNoIndices() { - client().admin().indices().prepareRefresh().execute().actionGet(); - DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery(); - deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); - deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)); - DeleteByQueryResponse actionGet = deleteByQueryRequestBuilder.execute().actionGet(); - assertThat(actionGet.getIndices().size(), equalTo(0)); - } - - @Test - public void testDeleteAllOneIndex() { - String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}"; - final long iters = randomIntBetween(1, 50); - for (int i = 0; i < iters; i++) { - client().prepareIndex("twitter", "tweet", "" + i).setSource(json).execute().actionGet(); - } - refresh(); - SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(iters)); - DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery(); - deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); - - DeleteByQueryResponse response = deleteByQueryRequestBuilder.execute().actionGet(); - assertThat(response.status(), equalTo(RestStatus.OK)); - assertSyncShardInfo(response.getIndex("twitter").getShardInfo(), getNumShards("twitter")); - - client().admin().indices().prepareRefresh().execute().actionGet(); - search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(0l)); - } - - @Test - public void testMissing() { - - String json = "{" + "\"user\":\"kimchy\"," + "\"postDate\":\"2013-01-30\"," + "\"message\":\"trying out Elastic Search\"" + "}"; - - client().prepareIndex("twitter", "tweet").setSource(json).setRefresh(true).execute().actionGet(); - - SearchResponse search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(1l)); - DeleteByQueryRequestBuilder deleteByQueryRequestBuilder = client().prepareDeleteByQuery(); - deleteByQueryRequestBuilder.setIndices("twitter", "missing"); - deleteByQueryRequestBuilder.setQuery(QueryBuilders.matchAllQuery()); - - try { - deleteByQueryRequestBuilder.execute().actionGet(); - fail("Exception should have been thrown."); - } catch (IndexMissingException e) { - //everything well - } - - deleteByQueryRequestBuilder.setIndicesOptions(IndicesOptions.lenientExpandOpen()); - DeleteByQueryResponse response = deleteByQueryRequestBuilder.execute().actionGet(); - assertThat(response.status(), equalTo(RestStatus.OK)); - assertSyncShardInfo(response.getIndex("twitter").getShardInfo(), getNumShards("twitter")); - - client().admin().indices().prepareRefresh().execute().actionGet(); - search = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(search.getHits().totalHits(), equalTo(0l)); - } - - @Test - public void testFailure() throws Exception { - assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); - - DeleteByQueryResponse response = client().prepareDeleteByQuery(indexOrAlias()) - .setQuery(QueryBuilders.hasChildQuery("type", QueryBuilders.matchAllQuery())) - .execute().actionGet(); - - NumShards twitter = getNumShards("test"); - - assertThat(response.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(response.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(response.getIndex("test").getShardInfo().getFailures().length, equalTo(twitter.numPrimaries)); - assertThat(response.getIndices().size(), equalTo(1)); - assertThat(response.getIndices().get("test").getShardInfo().getFailures().length, equalTo(twitter.numPrimaries)); - for (ActionWriteResponse.ShardInfo.Failure failure : response.getIndices().get("test").getShardInfo().getFailures()) { - assertThat(failure.reason(), containsString("[test] [has_child] query and filter unsupported in delete_by_query api")); - assertThat(failure.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(failure.shardId(), greaterThan(-1)); - } - } - - @Test - public void testDeleteByFieldQuery() throws Exception { - assertAcked(prepareCreate("test").addAlias(new Alias("alias"))); - int numDocs = scaledRandomIntBetween(10, 100); - for (int i = 0; i < numDocs; i++) { - client().prepareIndex("test", "test", Integer.toString(i)) - .setRouting(randomAsciiOfLengthBetween(1, 5)) - .setSource("foo", "bar").get(); - } - refresh(); - assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1)))).get(), 1); - assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs); - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery(indexOrAlias()) - .setQuery(QueryBuilders.matchQuery("_id", Integer.toString(between(0, numDocs - 1)))).get(); - assertThat(deleteByQueryResponse.getIndices().size(), equalTo(1)); - assertThat(deleteByQueryResponse.getIndex("test"), notNullValue()); - - refresh(); - assertHitCount(client().prepareCount("test").setQuery(QueryBuilders.matchAllQuery()).get(), numDocs - 1); - } - - @Test - public void testDateMath() throws Exception { - index("test", "type", "1", "d", "2013-01-01"); - ensureGreen(); - refresh(); - assertHitCount(client().prepareCount("test").get(), 1); - client().prepareDeleteByQuery("test").setQuery(QueryBuilders.rangeQuery("d").to("now-1h")).get(); - refresh(); - assertHitCount(client().prepareCount("test").get(), 0); - } - - @Test - public void testDeleteByTermQuery() throws ExecutionException, InterruptedException { - createIndex("test"); - ensureGreen(); - - int numDocs = iterations(10, 50); - IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs + 1]; - for (int i = 0; i < numDocs; i++) { - indexRequestBuilders[i] = client().prepareIndex("test", "test", Integer.toString(i)).setSource("field", "value"); - } - indexRequestBuilders[numDocs] = client().prepareIndex("test", "test", Integer.toString(numDocs)).setSource("field", "other_value"); - indexRandom(true, indexRequestBuilders); - - SearchResponse searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo((long)numDocs + 1)); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(QueryBuilders.termQuery("field", "value")).get(); - assertThat(deleteByQueryResponse.getIndices().size(), equalTo(1)); - for (IndexDeleteByQueryResponse indexDeleteByQueryResponse : deleteByQueryResponse) { - assertThat(indexDeleteByQueryResponse.getIndex(), equalTo("test")); - assertThat(indexDeleteByQueryResponse.getShardInfo().getFailures().length, equalTo(0)); - } - - refresh(); - searchResponse = client().prepareSearch("test").get(); - assertNoFailures(searchResponse); - assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); - } - - private static String indexOrAlias() { - return randomBoolean() ? "test" : "alias"; - } - - private void assertSyncShardInfo(ActionWriteResponse.ShardInfo shardInfo, NumShards numShards) { - assertThat(shardInfo.getTotal(), greaterThanOrEqualTo(numShards.totalNumShards)); - // we do not ensure green so just make sure request succeeded at least on all primaries - assertThat(shardInfo.getSuccessful(), greaterThanOrEqualTo(numShards.numPrimaries)); - assertThat(shardInfo.getFailed(), equalTo(0)); - for (ActionWriteResponse.ShardInfo.Failure failure : shardInfo.getFailures()) { - assertThat(failure.status(), equalTo(RestStatus.OK)); - } - } -} diff --git a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java index 430690ae146..f1e7a249c59 100644 --- a/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/ZenUnicastDiscoveryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.elasticsearch.test.discovery.ClusterDiscoveryConfiguration; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Before; import org.junit.Test; @@ -81,8 +82,9 @@ public class ZenUnicastDiscoveryTests extends ElasticsearchIntegrationTest { // can't be satisfied. public void testMinimumMasterNodes() throws Exception { int currentNumNodes = randomIntBetween(3, 5); - int currentNumOfUnicastHosts = randomIntBetween(1, currentNumNodes); - final Settings settings = ImmutableSettings.settingsBuilder().put("discovery.zen.minimum_master_nodes", currentNumNodes / 2 + 1).build(); + final int min_master_nodes = currentNumNodes / 2 + 1; + int currentNumOfUnicastHosts = randomIntBetween(min_master_nodes, currentNumNodes); + final Settings settings = ImmutableSettings.settingsBuilder().put("discovery.zen.minimum_master_nodes", min_master_nodes).build(); discoveryConfig = new ClusterDiscoveryConfiguration.UnicastZen(currentNumNodes, currentNumOfUnicastHosts, settings); List nodes = internalCluster().startNodesAsync(currentNumNodes).get(); diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java index 58e177b1115..228faa8cf4d 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryTests.java @@ -32,9 +32,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.Priority; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressorFactory; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.LocalTransportAddress; @@ -196,12 +193,7 @@ public class ZenDiscoveryTests extends ElasticsearchIntegrationTest { .put(new DiscoveryNode("abc", new LocalTransportAddress("abc"), Version.CURRENT)).masterNodeId("abc"); ClusterState.Builder builder = ClusterState.builder(state); builder.nodes(nodes); - BytesStreamOutput bStream = new BytesStreamOutput(); - StreamOutput stream = CompressorFactory.defaultCompressor().streamOutput(bStream); - stream.setVersion(node.version()); - ClusterState.Builder.writeTo(builder.build(), stream); - stream.close(); - BytesReference bytes = bStream.bytes(); + BytesReference bytes = PublishClusterStateAction.serializeFullClusterState(builder.build(), node.version()); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference reference = new AtomicReference<>(); diff --git a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java index 4d7811fa023..169dbdbe4a2 100644 --- a/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java +++ b/src/test/java/org/elasticsearch/discovery/zen/ZenDiscoveryUnitTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.discovery.zen; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -75,7 +74,7 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { try { shouldIgnoreOrRejectNewClusterState(logger, currentState.build(), newState.build()); fail("should ignore, because current state's master is not equal to new state's master"); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { assertThat(e.getMessage(), containsString("cluster state from a different master then the current one, rejecting")); } @@ -105,7 +104,7 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { int numUpdates = scaledRandomIntBetween(50, 100); LinkedList queue = new LinkedList<>(); for (int i = 0; i < numUpdates; i++) { - queue.add(new ProcessClusterState(ClusterState.builder(clusterName).version(i).nodes(nodes).build(), null)); + queue.add(new ProcessClusterState(ClusterState.builder(clusterName).version(i).nodes(nodes).build())); } ProcessClusterState mostRecent = queue.get(numUpdates - 1); Collections.shuffle(queue, getRandom()); @@ -121,15 +120,15 @@ public class ZenDiscoveryUnitTest extends ElasticsearchTestCase { DiscoveryNodes nodes2 = DiscoveryNodes.builder().masterNodeId("b").build(); LinkedList queue = new LinkedList<>(); - ProcessClusterState thirdMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(1).nodes(nodes1).build(), null); + ProcessClusterState thirdMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(1).nodes(nodes1).build()); queue.offer(thirdMostRecent); - ProcessClusterState secondMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(2).nodes(nodes1).build(), null); + ProcessClusterState secondMostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(2).nodes(nodes1).build()); queue.offer(secondMostRecent); - ProcessClusterState mostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(3).nodes(nodes1).build(), null); + ProcessClusterState mostRecent = new ProcessClusterState(ClusterState.builder(clusterName).version(3).nodes(nodes1).build()); queue.offer(mostRecent); Collections.shuffle(queue, getRandom()); - queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(4).nodes(nodes2).build(), null)); - queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(5).nodes(nodes1).build(), null)); + queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(4).nodes(nodes2).build())); + queue.offer(new ProcessClusterState(ClusterState.builder(clusterName).version(5).nodes(nodes1).build())); assertThat(ZenDiscovery.selectNextStateToProcess(queue), sameInstance(mostRecent.clusterState)); diff --git a/src/test/java/org/elasticsearch/document/BulkTests.java b/src/test/java/org/elasticsearch/document/BulkTests.java index f49914606dd..380828372bb 100644 --- a/src/test/java/org/elasticsearch/document/BulkTests.java +++ b/src/test/java/org/elasticsearch/document/BulkTests.java @@ -133,7 +133,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { assertThat(bulkResponse.getItems()[1].getResponse(), nullValue()); assertThat(bulkResponse.getItems()[1].getFailure().getIndex(), equalTo("test")); assertThat(bulkResponse.getItems()[1].getFailure().getId(), equalTo("7")); - assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("DocumentMissingException")); + assertThat(bulkResponse.getItems()[1].getFailure().getMessage(), containsString("document missing")); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getId(), equalTo("2")); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getIndex(), equalTo("test")); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l)); @@ -173,7 +173,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { .add(client().prepareUpdate("test", "type", "2").setDoc("field", "2")) .add(client().prepareUpdate("test", "type", "1").setVersion(2l).setDoc("field", "3")).get(); - assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version")); + assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict")); assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(2l)); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(3l)); @@ -194,7 +194,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "3").setVersion(20).setVersionType(VersionType.FORCE)) .add(client().prepareUpdate("test", "type", "e1").setDoc("field", "3").setVersion(20).setVersionType(VersionType.INTERNAL)).get(); - assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("Version")); + assertThat(bulkResponse.getItems()[0].getFailureMessage(), containsString("version conflict")); assertThat(((UpdateResponse) bulkResponse.getItems()[1].getResponse()).getVersion(), equalTo(20l)); assertThat(((UpdateResponse) bulkResponse.getItems()[2].getResponse()).getVersion(), equalTo(21l)); } @@ -325,7 +325,7 @@ public class BulkTests extends ElasticsearchIntegrationTest { int id = i + (numDocs / 2); if (i >= (numDocs / 2)) { assertThat(response.getItems()[i].getFailure().getId(), equalTo(Integer.toString(id))); - assertThat(response.getItems()[i].getFailure().getMessage(), containsString("DocumentMissingException")); + assertThat(response.getItems()[i].getFailure().getMessage(), containsString("document missing")); } else { assertThat(response.getItems()[i].getId(), equalTo(Integer.toString(id))); assertThat(response.getItems()[i].getVersion(), equalTo(3l)); diff --git a/src/test/java/org/elasticsearch/document/DocumentActionsTests.java b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java index 06b37172d2f..4de1e72b87b 100644 --- a/src/test/java/org/elasticsearch/document/DocumentActionsTests.java +++ b/src/test/java/org/elasticsearch/document/DocumentActionsTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -177,23 +176,6 @@ public class DocumentActionsTests extends ElasticsearchIntegrationTest { assertThat(countResponse.getSuccessfulShards(), equalTo(numShards.numPrimaries)); assertThat(countResponse.getFailedShards(), equalTo(0)); } - - logger.info("Delete by query"); - DeleteByQueryResponse queryResponse = client().prepareDeleteByQuery().setIndices("test").setQuery(termQuery("name", "test2")).execute().actionGet(); - assertThat(queryResponse.getIndex(getConcreteIndexName()).getShardInfo().getTotal(), greaterThanOrEqualTo(numShards.totalNumShards)); - assertThat(queryResponse.getIndex(getConcreteIndexName()).getShardInfo().getSuccessful(), greaterThanOrEqualTo(numShards.totalNumShards)); - assertThat(queryResponse.getIndex(getConcreteIndexName()).getShardInfo().getFailures().length, equalTo(0)); - client().admin().indices().refresh(refreshRequest("test")).actionGet(); - - logger.info("Get [type1/1] and [type1/2], should be empty"); - for (int i = 0; i < 5; i++) { - getResult = client().get(getRequest("test").type("type1").id("1")).actionGet(); - assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - assertThat("cycle #" + i, getResult.getSourceAsString(), equalTo(source("1", "test").string())); - getResult = client().get(getRequest("test").type("type1").id("2")).actionGet(); - assertThat("cycle #" + i, getResult.isExists(), equalTo(false)); - assertThat(getResult.getIndex(), equalTo(getConcreteIndexName())); - } } @Test diff --git a/src/test/java/org/elasticsearch/document/ShardInfoTests.java b/src/test/java/org/elasticsearch/document/ShardInfoTests.java index 22533eaef69..7bd950ca3c0 100644 --- a/src/test/java/org/elasticsearch/document/ShardInfoTests.java +++ b/src/test/java/org/elasticsearch/document/ShardInfoTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.deletebyquery.IndexDeleteByQueryResponse; import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.cluster.ClusterState; @@ -100,17 +99,6 @@ public class ShardInfoTests extends ElasticsearchIntegrationTest { } } - @Test - public void testDeleteByQuery() throws Exception { - int numPrimaryShards = randomIntBetween(1, 2); - prepareIndex(numPrimaryShards); - IndexDeleteByQueryResponse indexDeleteByQueryResponse = client().prepareDeleteByQuery("idx") - .setQuery(QueryBuilders.matchAllQuery()) - .get().getIndex("idx"); - assertShardInfo(indexDeleteByQueryResponse, numCopies * numPrimaryShards, numNodes * numPrimaryShards); - } - - private void prepareIndex(int numberOfPrimaryShards) throws Exception { prepareIndex(numberOfPrimaryShards, false); } diff --git a/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 154a043c996..f84457dab3b 100644 --- a/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.env; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.ImmutableSettings; @@ -35,7 +34,6 @@ import org.junit.Test; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Set; import java.util.concurrent.CountDownLatch; @@ -61,7 +59,7 @@ public class NodeEnvironmentTests extends ElasticsearchTestCase { try { new NodeEnvironment(settings, new Environment(settings)); fail("env is already locked"); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { } env.close(); diff --git a/src/test/java/org/elasticsearch/explain/ExplainActionTests.java b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java index 17a91370195..c22d4470875 100644 --- a/src/test/java/org/elasticsearch/explain/ExplainActionTests.java +++ b/src/test/java/org/elasticsearch/explain/ExplainActionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.explain; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.explain.ExplainResponse; @@ -275,7 +274,7 @@ public class ExplainActionTests extends ElasticsearchIntegrationTest { @Test public void streamExplainTest() throws Exception { - Explanation exp = new Explanation((float) 2.0, "some explanation"); + Explanation exp = Explanation.match(2f, "some explanation"); // write ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); @@ -289,8 +288,7 @@ public class ExplainActionTests extends ElasticsearchIntegrationTest { Explanation result = Lucene.readExplanation(esBuffer); assertThat(exp.toString(),equalTo(result.toString())); - exp = new ComplexExplanation(true, 2.0f, "some explanation"); - exp.addDetail(new Explanation(2.0f,"another explanation")); + exp = Explanation.match(2.0f, "some explanation", Explanation.match(2.0f,"another explanation")); // write complex outBuffer = new ByteArrayOutputStream(); diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java new file mode 100644 index 00000000000..97231bec442 --- /dev/null +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsIntegrationTests.java @@ -0,0 +1,216 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.fieldstats; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.test.ElasticsearchIntegrationTest; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.nullValue; + +/** + */ +public class FieldStatsIntegrationTests extends ElasticsearchIntegrationTest { + + public void testRandom() throws Exception { + assertAcked(prepareCreate("test").addMapping( + "test", "string", "type=string", "date", "type=date", "double", "type=double", "double", "type=double", + "float", "type=float", "long", "type=long", "integer", "type=integer", "short", "type=short", "byte", "type=byte" + )); + ensureGreen("test"); + + byte minByte = Byte.MAX_VALUE; + byte maxByte = Byte.MIN_VALUE; + short minShort = Short.MAX_VALUE; + short maxShort = Short.MIN_VALUE; + int minInt = Integer.MAX_VALUE; + int maxInt = Integer.MIN_VALUE; + long minLong = Long.MAX_VALUE; + long maxLong = Long.MIN_VALUE; + float minFloat = Float.MAX_VALUE; + float maxFloat = Float.MIN_VALUE; + double minDouble = Double.MAX_VALUE; + double maxDouble = Double.MIN_VALUE; + String minString = new String(Character.toChars(1114111)); + String maxString = "0"; + + int numDocs = scaledRandomIntBetween(128, 1024); + List request = new ArrayList<>(numDocs); + for (int doc = 0; doc < numDocs; doc++) { + byte b = randomByte(); + minByte = (byte) Math.min(minByte, b); + maxByte = (byte) Math.max(maxByte, b); + short s = randomShort(); + minShort = (short) Math.min(minShort, s); + maxShort = (short) Math.max(maxShort, s); + int i = randomInt(); + minInt = Math.min(minInt, i); + maxInt = Math.max(maxInt, i); + long l = randomLong(); + minLong = Math.min(minLong, l); + maxLong = Math.max(maxLong, l); + float f = randomFloat(); + minFloat = Math.min(minFloat, f); + maxFloat = Math.max(maxFloat, f); + double d = randomDouble(); + minDouble = Math.min(minDouble, d); + maxDouble = Math.max(maxDouble, d); + String str = randomRealisticUnicodeOfLength(3); + if (str.compareTo(minString) < 0) { + minString = str; + } + if (str.compareTo(maxString) > 0) { + maxString = str; + } + + request.add(client().prepareIndex("test", "test", Integer.toString(doc)) + .setSource("byte", b, "short", s, "integer", i, "long", l, "float", f, "double", d, "string", str) + ); + } + indexRandom(true, false, request); + + FieldStatsResponse response = client().prepareFieldStats().setFields("byte", "short", "integer", "long", "float", "double", "string").get(); + assertAllSuccessful(response); + + for (FieldStats stats : response.getAllFieldStats().values()) { + assertThat(stats.getMaxDoc(), equalTo((long) numDocs)); + assertThat(stats.getDocCount(), equalTo((long) numDocs)); + assertThat(stats.getDensity(), equalTo(100)); + } + + assertThat(response.getAllFieldStats().get("byte").getMinValue(), equalTo(Byte.toString(minByte))); + assertThat(response.getAllFieldStats().get("byte").getMaxValue(), equalTo(Byte.toString(maxByte))); + assertThat(response.getAllFieldStats().get("short").getMinValue(), equalTo(Short.toString(minShort))); + assertThat(response.getAllFieldStats().get("short").getMaxValue(), equalTo(Short.toString(maxShort))); + assertThat(response.getAllFieldStats().get("integer").getMinValue(), equalTo(Integer.toString(minInt))); + assertThat(response.getAllFieldStats().get("integer").getMaxValue(), equalTo(Integer.toString(maxInt))); + assertThat(response.getAllFieldStats().get("long").getMinValue(), equalTo(Long.toString(minLong))); + assertThat(response.getAllFieldStats().get("long").getMaxValue(), equalTo(Long.toString(maxLong))); + assertThat(response.getAllFieldStats().get("float").getMinValue(), equalTo(Float.toString(minFloat))); + assertThat(response.getAllFieldStats().get("float").getMaxValue(), equalTo(Float.toString(maxFloat))); + assertThat(response.getAllFieldStats().get("double").getMinValue(), equalTo(Double.toString(minDouble))); + assertThat(response.getAllFieldStats().get("double").getMaxValue(), equalTo(Double.toString(maxDouble))); + } + + public void testFieldStatsIndexLevel() throws Exception { + assertAcked(prepareCreate("test1").addMapping( + "test", "value", "type=long" + )); + assertAcked(prepareCreate("test2").addMapping( + "test", "value", "type=long" + )); + assertAcked(prepareCreate("test3").addMapping( + "test", "value", "type=long" + )); + ensureGreen("test1", "test2", "test3"); + + indexRange("test1", -10, 100); + indexRange("test2", 101, 200); + indexRange("test3", 201, 300); + + // default: + FieldStatsResponse response = client().prepareFieldStats().setFields("value").get(); + assertAllSuccessful(response); + assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(Long.toString(300))); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1)); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(Long.toString(300))); + + // Level: cluster + response = client().prepareFieldStats().setFields("value").setLevel("cluster").get(); + assertAllSuccessful(response); + assertThat(response.getAllFieldStats().get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getAllFieldStats().get("value").getMaxValue(), equalTo(Long.toString(300))); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(1)); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getIndicesMergedFieldStats().get("_all").get("value").getMaxValue(), equalTo(Long.toString(300))); + + // Level: indices + response = client().prepareFieldStats().setFields("value").setLevel("indices").get(); + assertAllSuccessful(response); + assertThat(response.getAllFieldStats(), nullValue()); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(3)); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(Long.toString(-10))); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(Long.toString(100))); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo(Long.toString(101))); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo(Long.toString(200))); + assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMinValue(), equalTo(Long.toString(201))); + assertThat(response.getIndicesMergedFieldStats().get("test3").get("value").getMaxValue(), equalTo(Long.toString(300))); + + // Illegal level option: + try { + client().prepareFieldStats().setFields("value").setLevel("illegal").get(); + fail(); + } catch (ActionRequestValidationException e) { + assertThat(e.getMessage(), equalTo("Validation Failed: 1: invalid level option [illegal];")); + } + } + + public void testIncompatibleFieldTypes() { + assertAcked(prepareCreate("test1").addMapping( + "test", "value", "type=long" + )); + assertAcked(prepareCreate("test2").addMapping( + "test", "value", "type=string" + )); + ensureGreen("test1", "test2"); + + client().prepareIndex("test1", "test").setSource("value", 1l).get(); + client().prepareIndex("test1", "test").setSource("value", 2l).get(); + client().prepareIndex("test2", "test").setSource("value", "a").get(); + client().prepareIndex("test2", "test").setSource("value", "b").get(); + refresh(); + + try { + client().prepareFieldStats().setFields("value").get(); + fail(); + } catch (IllegalStateException e){ + assertThat(e.getMessage(), containsString("trying to merge the field stats of field [value]")); + } + + FieldStatsResponse response = client().prepareFieldStats().setFields("value").setLevel("indices").get(); + assertAllSuccessful(response); + assertThat(response.getIndicesMergedFieldStats().size(), equalTo(2)); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMinValue(), equalTo(Long.toString(1))); + assertThat(response.getIndicesMergedFieldStats().get("test1").get("value").getMaxValue(), equalTo(Long.toString(2))); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMinValue(), equalTo("a")); + assertThat(response.getIndicesMergedFieldStats().get("test2").get("value").getMaxValue(), equalTo("b")); + } + + private void indexRange(String index, long from, long to) throws ExecutionException, InterruptedException { + List requests = new ArrayList<>(); + for (long value = from; value <= to; value++) { + requests.add(client().prepareIndex(index, "test").setSource("value", value)); + } + indexRandom(true, false, requests); + } + +} diff --git a/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java b/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java new file mode 100644 index 00000000000..cd5ececc18a --- /dev/null +++ b/src/test/java/org/elasticsearch/fieldstats/FieldStatsTests.java @@ -0,0 +1,194 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.fieldstats; + +import org.elasticsearch.action.fieldstats.FieldStats; +import org.elasticsearch.action.fieldstats.FieldStatsResponse; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchSingleNodeTest; + +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; + +import static org.hamcrest.Matchers.*; + +/** + */ +public class FieldStatsTests extends ElasticsearchSingleNodeTest { + + public void testByte() { + testNumberRange("field1", "byte", 12, 18); + testNumberRange("field1", "byte", -5, 5); + testNumberRange("field1", "byte", -18, -12); + } + + public void testShort() { + testNumberRange("field1", "short", 256, 266); + testNumberRange("field1", "short", -5, 5); + testNumberRange("field1", "short", -266, -256); + } + + public void testInteger() { + testNumberRange("field1", "integer", 56880, 56890); + testNumberRange("field1", "integer", -5, 5); + testNumberRange("field1", "integer", -56890, -56880); + } + + public void testLong() { + testNumberRange("field1", "long", 312321312312412l, 312321312312422l); + testNumberRange("field1", "long", -5, 5); + testNumberRange("field1", "long", -312321312312422l, -312321312312412l); + } + + public void testString() { + createIndex("test", ImmutableSettings.EMPTY, "field", "value", "type=string"); + for (int value = 0; value <= 10; value++) { + client().prepareIndex("test", "test").setSource("field", String.format(Locale.ENGLISH, "%03d", value)).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields("field").get(); + assertThat(result.getAllFieldStats().get("field").getMaxDoc(), equalTo(11l)); + assertThat(result.getAllFieldStats().get("field").getDocCount(), equalTo(11l)); + assertThat(result.getAllFieldStats().get("field").getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get("field").getMinValue(), equalTo(String.format(Locale.ENGLISH, "%03d", 0))); + assertThat(result.getAllFieldStats().get("field").getMaxValue(), equalTo(String.format(Locale.ENGLISH, "%03d", 10))); + } + + public void testDouble() { + String fieldName = "field"; + createIndex("test", ImmutableSettings.EMPTY, fieldName, "value", "type=double"); + for (double value = -1; value <= 9; value++) { + client().prepareIndex("test", "test").setSource(fieldName, value).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get(); + assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(Double.toString(-1))); + assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(Double.toString(9))); + } + + public void testFloat() { + String fieldName = "field"; + createIndex("test", ImmutableSettings.EMPTY, fieldName, "value", "type=float"); + for (float value = -1; value <= 9; value++) { + client().prepareIndex("test", "test").setSource(fieldName, value).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get(); + assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(11l)); + assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(Float.toString(-1))); + assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(Float.toString(9))); + } + + private void testNumberRange(String fieldName, String fieldType, long min, long max) { + createIndex("test", ImmutableSettings.EMPTY, fieldName, "value", "type=" + fieldType); + for (long value = min; value <= max; value++) { + client().prepareIndex("test", "test").setSource(fieldName, value).get(); + } + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields(fieldName).get(); + long numDocs = max - min + 1; + assertThat(result.getAllFieldStats().get(fieldName).getMaxDoc(), equalTo(numDocs)); + assertThat(result.getAllFieldStats().get(fieldName).getDocCount(), equalTo(numDocs)); + assertThat(result.getAllFieldStats().get(fieldName).getDensity(), equalTo(100)); + assertThat(result.getAllFieldStats().get(fieldName).getMinValue(), equalTo(java.lang.Long.toString(min))); + assertThat(result.getAllFieldStats().get(fieldName).getMaxValue(), equalTo(java.lang.Long.toString(max))); + client().admin().indices().prepareDelete("test").get(); + } + + public void testMerge() { + List stats = new ArrayList<>(); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + + FieldStats stat = new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l); + for (FieldStats otherStat : stats) { + stat.append(otherStat); + } + assertThat(stat.getMaxDoc(), equalTo(4l)); + assertThat(stat.getDocCount(), equalTo(4l)); + assertThat(stat.getSumDocFreq(), equalTo(4l)); + assertThat(stat.getSumTotalTermFreq(), equalTo(4l)); + } + + public void testMerge_notAvailable() { + List stats = new ArrayList<>(); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + stats.add(new FieldStats.Long(1, 1l, 1l, 1l, 1l, 1l)); + + FieldStats stat = new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l); + for (FieldStats otherStat : stats) { + stat.append(otherStat); + } + assertThat(stat.getMaxDoc(), equalTo(4l)); + assertThat(stat.getDocCount(), equalTo(-1l)); + assertThat(stat.getSumDocFreq(), equalTo(-1l)); + assertThat(stat.getSumTotalTermFreq(), equalTo(-1l)); + + stats.add(new FieldStats.Long(1, -1l, -1l, -1l, 1l, 1l)); + stat = stats.remove(0); + for (FieldStats otherStat : stats) { + stat.append(otherStat); + } + assertThat(stat.getMaxDoc(), equalTo(4l)); + assertThat(stat.getDocCount(), equalTo(-1l)); + assertThat(stat.getSumDocFreq(), equalTo(-1l)); + assertThat(stat.getSumTotalTermFreq(), equalTo(-1l)); + } + + public void testInvalidField() { + createIndex("test1", ImmutableSettings.EMPTY, "field1", "value", "type=string"); + client().prepareIndex("test1", "test").setSource("field1", "a").get(); + client().prepareIndex("test1", "test").setSource("field1", "b").get(); + + createIndex("test2", ImmutableSettings.EMPTY, "field2", "value", "type=string"); + client().prepareIndex("test2", "test").setSource("field2", "a").get(); + client().prepareIndex("test2", "test").setSource("field2", "b").get(); + client().admin().indices().prepareRefresh().get(); + + FieldStatsResponse result = client().prepareFieldStats().setFields("field1", "field2").get(); + assertThat(result.getFailedShards(), equalTo(2)); + assertThat(result.getTotalShards(), equalTo(2)); + assertThat(result.getSuccessfulShards(), equalTo(0)); + assertThat(result.getShardFailures()[0].reason(), either(containsString("field [field1] doesn't exist")).or(containsString("field [field2] doesn't exist"))); + assertThat(result.getIndicesMergedFieldStats().size(), equalTo(0)); + + // will only succeed on the 'test2' shard, because there the field does exist + result = client().prepareFieldStats().setFields("field1").get(); + assertThat(result.getFailedShards(), equalTo(1)); + assertThat(result.getTotalShards(), equalTo(2)); + assertThat(result.getSuccessfulShards(), equalTo(1)); + assertThat(result.getShardFailures()[0].reason(), either(containsString("field [field1] doesn't exist")).or(containsString("field [field2] doesn't exist"))); + assertThat(result.getIndicesMergedFieldStats().get("_all").get("field1").getMinValue(), equalTo("a")); + assertThat(result.getIndicesMergedFieldStats().get("_all").get("field1").getMaxValue(), equalTo("b")); + } + +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java index 5581290d0e5..ed160d92b32 100644 --- a/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java +++ b/src/test/java/org/elasticsearch/gateway/MetaDataStateFormatTest.java @@ -21,7 +21,6 @@ package org.elasticsearch.gateway; import com.google.common.collect.Iterators; import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.ChecksumIndexInput; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; @@ -29,9 +28,7 @@ import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.MockDirectoryWrapper; import org.apache.lucene.store.SimpleFSDirectory; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestRuleMarkFailure; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MetaData; @@ -44,10 +41,8 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Assert; import org.junit.Test; -import java.io.Closeable; import java.io.InputStream; import java.io.IOException; import java.io.OutputStream; @@ -272,7 +267,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { try { format.loadLatestState(logger, dirs); fail("latest version can not be read"); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), startsWith("Could not find a state file to recover from among ")); } // write the next state file in the new format and ensure it get's a higher ID @@ -538,33 +533,6 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase { } } - // copied from lucene - it's package private - final class CloseableDirectory implements Closeable { - private final BaseDirectoryWrapper dir; - private final TestRuleMarkFailure failureMarker; - - public CloseableDirectory(BaseDirectoryWrapper dir, - TestRuleMarkFailure failureMarker) { - this.dir = dir; - this.failureMarker = failureMarker; - } - - @Override - public void close() throws IOException { - // We only attempt to check open/closed state if there were no other test - // failures. - try { - if (failureMarker.wasSuccessful() && dir.isOpen()) { - Assert.fail("Directory not closed: " + dir); - } - } finally { - if (dir.isOpen()) { - dir.close(); - } - } - } - } - public Path[] content(String glob, Path dir) throws IOException { try (DirectoryStream stream = Files.newDirectoryStream(dir, glob)) { return Iterators.toArray(stream.iterator(), Path.class); diff --git a/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java b/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java index 00e6fcdf4f1..6e7e986eb93 100644 --- a/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java +++ b/src/test/java/org/elasticsearch/gateway/RecoverAfterNodesTests.java @@ -51,7 +51,7 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { ImmutableSet blocks; do { blocks = nodeClient.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA); + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE); } while (!blocks.isEmpty() && (System.currentTimeMillis() - start) < timeout.millis()); return blocks; @@ -67,17 +67,17 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { logger.info("--> start node (1)"); Client clientNode1 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3)); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start node (2)"); Client clientNode2 = startNode(settingsBuilder().put("gateway.recover_after_nodes", 3)); Thread.sleep(BLOCK_WAIT_TIMEOUT.millis()); assertThat(clientNode1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(clientNode2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start node (3)"); @@ -93,28 +93,28 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { logger.info("--> start master_node (1)"); Client master1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", false).put("node.master", true)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (1)"); Client data1 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (2)"); Client data2 = startNode(settingsBuilder().put("gateway.recover_after_master_nodes", 2).put("node.data", true).put("node.master", false)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start master_node (2)"); @@ -130,28 +130,28 @@ public class RecoverAfterNodesTests extends ElasticsearchIntegrationTest { logger.info("--> start master_node (1)"); Client master1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (1)"); Client data1 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", true).put("node.master", false)); assertThat(master1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start master_node (2)"); Client master2 = startNode(settingsBuilder().put("gateway.recover_after_data_nodes", 2).put("node.data", false).put("node.master", true)); assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(data1.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); assertThat(master2.admin().cluster().prepareState().setLocal(true).execute().actionGet() - .getState().blocks().global(ClusterBlockLevel.METADATA), + .getState().blocks().global(ClusterBlockLevel.METADATA_WRITE), hasItem(GatewayService.STATE_NOT_RECOVERED_BLOCK)); logger.info("--> start data_node (2)"); diff --git a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java index 42b8822a980..44321fad582 100644 --- a/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java +++ b/src/test/java/org/elasticsearch/gateway/RecoveryFromGatewayTests.java @@ -35,7 +35,6 @@ import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.InternalTestCluster.RestartCallback; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.Test; @@ -353,7 +352,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest { .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) .put("gateway.recover_after_nodes", 4) .put(ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_CONCURRENT_RECOVERIES, 4) - .put(MockDirectoryHelper.CRASH_INDEX, false).build(); + .put(MockFSDirectoryService.CRASH_INDEX, false).build(); internalCluster().startNodesAsync(4, settings).get(); // prevent any rebalance actions during the peer recovery diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java index 071cccd4f46..94b83d94424 100644 --- a/src/test/java/org/elasticsearch/get/GetActionTests.java +++ b/src/test/java/org/elasticsearch/get/GetActionTests.java @@ -20,12 +20,13 @@ package org.elasticsearch.get; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; +import org.elasticsearch.Version; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.action.get.*; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Base64; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; @@ -412,7 +413,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate(index) .addMapping(type, mapping) - .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))); + .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); client().prepareIndex(index, type, "1") .setSource(jsonBuilder().startObject().field("field", "1", "2").field("excluded", "should not be seen").endObject()) @@ -446,7 +447,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate(index) .addMapping(type, mapping) - .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))); + .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); client().prepareIndex(index, type, "1") .setSource(jsonBuilder().startObject().field("field", "1", "2").field("included", "should be seen").endObject()) @@ -482,7 +483,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { assertAcked(prepareCreate(index) .addMapping(type, mapping) - .setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1))); + .setSettings("index.refresh_interval", -1, IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id)); client().prepareIndex(index, type, "1") .setSource(jsonBuilder().startObject() @@ -783,7 +784,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { try { client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { //all well } @@ -792,7 +793,7 @@ public class GetActionTests extends ElasticsearchIntegrationTest { try { client().prepareGet(indexOrAlias(), "my-type1", "1").setFields("field1").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { //all well } } diff --git a/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java b/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java index 623ce887619..78d03746b93 100644 --- a/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java +++ b/src/test/java/org/elasticsearch/http/netty/NettyHttpServerPipeliningTest.java @@ -76,7 +76,7 @@ public class NettyHttpServerPipeliningTest extends ElasticsearchTestCase { networkService = new NetworkService(ImmutableSettings.EMPTY); threadPool = new ThreadPool("test"); mockPageCacheRecycler = new MockPageCacheRecycler(ImmutableSettings.EMPTY, threadPool); - bigArrays = new MockBigArrays(ImmutableSettings.EMPTY, mockPageCacheRecycler, new NoneCircuitBreakerService()); + bigArrays = new MockBigArrays(mockPageCacheRecycler, new NoneCircuitBreakerService()); } @After @@ -137,7 +137,7 @@ public class NettyHttpServerPipeliningTest extends ElasticsearchTestCase { } @Override - public HttpServerTransport stop() throws ElasticsearchException { + public HttpServerTransport stop() { executorService.shutdownNow(); return super.stop(); } diff --git a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java index 95f95defec2..ecf36582437 100644 --- a/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java +++ b/src/test/java/org/elasticsearch/index/aliases/IndexAliasesServiceTests.java @@ -66,8 +66,8 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { assertThat(indexAliasesService.hasAlias("dogs"), equalTo(true)); assertThat(indexAliasesService.hasAlias("turtles"), equalTo(false)); - assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("cache(QueryWrapperFilter(animal:cat))")); - assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:cat)) cache(QueryWrapperFilter(animal:dog)))")); + assertThat(indexAliasesService.aliasFilter("cats").toString(), equalTo("QueryWrapperFilter(animal:cat)")); + assertThat(indexAliasesService.aliasFilter("cats", "dogs").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:cat) QueryWrapperFilter(animal:dog))")); // Non-filtering alias should turn off all filters because filters are ORed assertThat(indexAliasesService.aliasFilter("all"), nullValue()); @@ -76,7 +76,7 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { indexAliasesService.add("cats", filter(termFilter("animal", "feline"))); indexAliasesService.add("dogs", filter(termFilter("animal", "canine"))); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:canine)) cache(QueryWrapperFilter(animal:feline)))")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:canine) QueryWrapperFilter(animal:feline))")); } @Test @@ -86,13 +86,13 @@ public class IndexAliasesServiceTests extends ElasticsearchSingleNodeTest { indexAliasesService.add("dogs", filter(termFilter("animal", "dog"))); assertThat(indexAliasesService.aliasFilter(), nullValue()); - assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("cache(QueryWrapperFilter(animal:dog))")); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:dog)) cache(QueryWrapperFilter(animal:cat)))")); + assertThat(indexAliasesService.aliasFilter("dogs").toString(), equalTo("QueryWrapperFilter(animal:dog)")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:dog) QueryWrapperFilter(animal:cat))")); indexAliasesService.add("cats", filter(termFilter("animal", "feline"))); indexAliasesService.add("dogs", filter(termFilter("animal", "canine"))); - assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(cache(QueryWrapperFilter(animal:canine)) cache(QueryWrapperFilter(animal:feline)))")); + assertThat(indexAliasesService.aliasFilter("dogs", "cats").toString(), equalTo("QueryWrapperFilter(QueryWrapperFilter(animal:canine) QueryWrapperFilter(animal:feline))")); } @Test(expected = InvalidAliasNameException.class) diff --git a/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java index 500834fbee1..eac199db7ed 100644 --- a/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/AnalysisFactoryTests.java @@ -164,6 +164,8 @@ public class AnalysisFactoryTests extends ElasticsearchTestCase { put("hyphenatedwords", Void.class); // repeats anything marked as keyword put("keywordrepeat", Void.class); + // like limittokencount, but by offset + put("limittokenoffset", Void.class); // like limittokencount, but by position put("limittokenposition", Void.class); // ??? diff --git a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java index cd4ababaaf4..78f2bd5077c 100644 --- a/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/KeepFilterFactoryTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.index.analysis; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.FailedToResolveConfigException; @@ -57,7 +56,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); Assert.fail("path and array are configured"); } catch (Exception e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } @@ -83,7 +82,7 @@ public class KeepFilterFactoryTests extends ElasticsearchTokenStreamTestCase { AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); fail("expected an exception indicating that you can't use [keep_words_path] with [keep_words] "); } catch (Throwable e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } diff --git a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java index 669613b4ebe..7b66c4f41e6 100644 --- a/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/NGramTokenizerFactoryTests.java @@ -24,7 +24,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.ngram.*; import org.apache.lucene.analysis.reverse.ReverseStringFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings.Builder; @@ -58,7 +57,7 @@ public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase try { new NGramTokenizerFactory(index, indexSettings, name, settings).create(); fail(); - } catch (ElasticsearchIllegalArgumentException expected) { + } catch (IllegalArgumentException expected) { // OK } } @@ -153,7 +152,7 @@ public class NGramTokenizerFactoryTests extends ElasticsearchTokenStreamTestCase try { new EdgeNGramTokenizerFactory(index, indexSettings, name, settings).create(); fail("should fail side:back is not supported anymore"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } } diff --git a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java index 09a3478d752..996471a205c 100644 --- a/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/PatternCaptureTokenFilterTests.java @@ -19,12 +19,10 @@ package org.elasticsearch.index.analysis; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.inject.ModulesBuilder; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; @@ -68,7 +66,7 @@ public class PatternCaptureTokenFilterTests extends ElasticsearchTokenStreamTest } - @Test(expected=ElasticsearchIllegalArgumentException.class) + @Test(expected=IllegalArgumentException.class) public void testNoPatterns() { new PatternCaptureGroupTokenFilterFactory(new Index("test"), settingsBuilder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).build(), "pattern_capture", settingsBuilder().put("pattern", "foobar").build()); } diff --git a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java index 9947a9bd36d..2792f0c4150 100644 --- a/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java +++ b/src/test/java/org/elasticsearch/index/analysis/commongrams/CommonGramsTokenFilterFactoryTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.index.analysis.commongrams; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.analysis.AnalysisService; @@ -46,7 +45,7 @@ public class CommonGramsTokenFilterFactoryTests extends ElasticsearchTokenStream AnalysisTestsHelper.createAnalysisServiceFromSettings(settings); Assert.fail("[common_words] or [common_words_path] is set"); } catch (Exception e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } @Test diff --git a/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java b/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java index 72b0134b4ca..a57e81ff7f3 100644 --- a/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java +++ b/src/test/java/org/elasticsearch/index/cache/bitset/BitSetFilterCacheTest.java @@ -31,11 +31,11 @@ import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.Term; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.Index; import org.elasticsearch.test.ElasticsearchTestCase; @@ -72,7 +72,7 @@ public class BitSetFilterCacheTest extends ElasticsearchTestCase { IndexSearcher searcher = new IndexSearcher(reader); BitsetFilterCache cache = new BitsetFilterCache(new Index("test"), ImmutableSettings.EMPTY); - BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(Queries.wrap(new TermQuery(new Term("field", "value")))); + BitDocIdSetFilter filter = cache.getBitDocIdSetFilter(new QueryWrapperFilter(new TermQuery(new Term("field", "value")))); TopDocs docs = searcher.search(new ConstantScoreQuery(filter), 1); assertThat(docs.totalHits, equalTo(3)); diff --git a/src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java b/src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java deleted file mode 100644 index 437ed34366b..00000000000 --- a/src/test/java/org/elasticsearch/index/cache/query/parser/resident/ResidentQueryParserCacheTest.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.cache.query.parser.resident; - -import org.apache.lucene.index.Term; -import org.apache.lucene.queryparser.classic.QueryParserSettings; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.index.Index; -import org.elasticsearch.test.ElasticsearchTestCase; -import org.junit.Test; - -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.sameInstance; - -/** - */ -public class ResidentQueryParserCacheTest extends ElasticsearchTestCase { - - @Test - public void testCaching() throws Exception { - ResidentQueryParserCache cache = new ResidentQueryParserCache(new Index("test"), ImmutableSettings.EMPTY); - QueryParserSettings key = new QueryParserSettings(); - key.queryString("abc"); - key.defaultField("a"); - key.boost(2.0f); - - Query query = new TermQuery(new Term("a", "abc")); - cache.put(key, query); - - assertThat(cache.get(key), not(sameInstance(query))); - assertThat(cache.get(key), equalTo(query)); - } - -} diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java index 8d3e7280c26..1dd88a83a1e 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineSettingsTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.engine; import org.apache.lucene.index.LiveIndexWriterConfig; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexService; @@ -108,7 +107,7 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { try { client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); fail("settings update didn't fail, but should have"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // good } @@ -118,7 +117,7 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { try { client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); fail("settings update didn't fail, but should have"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // good } @@ -128,7 +127,7 @@ public class InternalEngineSettingsTest extends ElasticsearchSingleNodeTest { try { client().admin().indices().prepareUpdateSettings("foo").setSettings(settings).get(); fail("settings update didn't fail, but should have"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // good } } diff --git a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index ccc84c2caf7..1c86db6b451 100644 --- a/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -19,7 +19,10 @@ package org.elasticsearch.index.engine; +import com.carrotsearch.randomizedtesting.annotations.Repeat; +import com.carrotsearch.randomizedtesting.annotations.Seed; import com.google.common.collect.ImmutableMap; + import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -29,7 +32,14 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.document.Field; import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.TextField; -import org.apache.lucene.index.*; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexDeletionPolicy; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LiveIndexWriterConfig; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -258,7 +268,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, new TranslogHandler(shardId.index().getName())); + }, new TranslogHandler(shardId.index().getName()), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); return config; @@ -1372,34 +1382,29 @@ public class InternalEngineTests extends ElasticsearchTestCase { public void testBasicCreatedFlag() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertFalse(index.created()); + assertFalse(engine.index(index)); engine.delete(new Engine.Delete(null, "1", newUid("1"))); index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); } @Test public void testCreatedFlagAfterFlush() { ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, testDocument(), B_1, null); Engine.Index index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); engine.delete(new Engine.Delete(null, "1", newUid("1"))); engine.flush(); index = new Engine.Index(null, newUid("1"), doc); - engine.index(index); - assertTrue(index.created()); + assertTrue(engine.index(index)); } private static class MockAppender extends AppenderSkeleton { @@ -1780,6 +1785,7 @@ public class InternalEngineTests extends ElasticsearchTestCase { assertThat(topDocs.totalHits, equalTo(numDocs)); } engine.close(); + boolean recoveredButFailed = false; final MockDirectoryWrapper directory = DirectoryUtils.getLeaf(store.directory(), MockDirectoryWrapper.class); if (directory != null) { // since we rollback the IW we are writing the same segment files again after starting IW but MDW prevents @@ -1797,7 +1803,16 @@ public class InternalEngineTests extends ElasticsearchTestCase { started = true; break; } catch (EngineCreationFailureException ex) { - // skip + // sometimes we fail after we committed the recovered docs during the finaly refresh call + // that means hte index is consistent and recovered so we can't assert on the num recovered ops below. + try (IndexReader reader = DirectoryReader.open(directory.getDelegate())) { + if (reader.numDocs() == numDocs) { + recoveredButFailed = true; + break; + } else { + // skip - we just failed + } + } } } @@ -1816,8 +1831,10 @@ public class InternalEngineTests extends ElasticsearchTestCase { TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), randomIntBetween(numDocs, numDocs + 10)); assertThat(topDocs.totalHits, equalTo(numDocs)); } - TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); - assertEquals(numDocs, parser.recoveredOps.get()); + if (recoveredButFailed == false) { + TranslogHandler parser = (TranslogHandler) engine.config().getTranslogRecoveryPerformer(); + assertEquals(numDocs, parser.recoveredOps.get()); + } } @Test diff --git a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java index e2acb3e635d..69ae60591a6 100644 --- a/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java +++ b/src/test/java/org/elasticsearch/index/engine/ShadowEngineTests.java @@ -27,9 +27,11 @@ import org.apache.lucene.index.IndexDeletionPolicy; import org.apache.lucene.index.IndexWriterConfig; import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MockDirectoryWrapper; +import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; @@ -73,6 +75,8 @@ import java.io.IOException; import java.nio.file.Path; import java.util.Arrays; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.common.settings.ImmutableSettings.Builder.EMPTY_SETTINGS; import static org.hamcrest.Matchers.*; @@ -249,7 +253,7 @@ public class ShadowEngineTests extends ElasticsearchTestCase { public void onFailedEngine(ShardId shardId, String reason, @Nullable Throwable t) { // we don't need to notify anybody in this test } - }, null); + }, null, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy()); return config; @@ -939,4 +943,56 @@ public class ShadowEngineTests extends ElasticsearchTestCase { assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); assertEquals(replicaEngine.config().getIndexConcurrency(), indexConcurrency); } + + @Test + public void testShadowEngineCreationRetry() throws Exception { + final Path srDir = createTempDir(); + final Store srStore = createStore(srDir); + Lucene.cleanLuceneIndex(srStore.directory()); + final Translog srTranslog = createTranslogReplica(); + + final AtomicBoolean succeeded = new AtomicBoolean(false); + final CountDownLatch latch = new CountDownLatch(1); + + // Create a shadow Engine, which will freak out because there is no + // index yet + Thread t = new Thread(new Runnable() { + @Override + public void run() { + try { + latch.await(); + } catch (InterruptedException e) { + // ignore interruptions + } + try (ShadowEngine srEngine = createShadowEngine(srStore, srTranslog)) { + succeeded.set(true); + } catch (Exception e) { + fail("should have been able to create the engine!"); + } + } + }); + t.start(); + + // count down latch + // now shadow engine should try to be created + latch.countDown(); + + // Create an InternalEngine, which creates the index so the shadow + // replica will handle it correctly + Store pStore = createStore(srDir); + Translog pTranslog = createTranslog(); + InternalEngine pEngine = createInternalEngine(pStore, pTranslog); + + // create a document + ParseContext.Document document = testDocumentWithTextField(); + document.add(new Field(SourceFieldMapper.NAME, B_1.toBytes(), SourceFieldMapper.Defaults.FIELD_TYPE)); + ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, document, B_1, null); + pEngine.create(new Engine.Create(null, newUid("1"), doc)); + pEngine.flush(true, true); + + t.join(); + assertTrue("ShadowEngine should have been able to be created", succeeded.get()); + // (shadow engine is already shut down in the try-with-resources) + IOUtils.close(srTranslog, srStore, pTranslog, pEngine, pStore); + } } diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java similarity index 98% rename from src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingIntegrationTests.java rename to src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java index 36fd5dc0348..32da43da403 100644 --- a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingIntegrationTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingIntegrationTests.java @@ -16,13 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.mapper.dynamic; +package org.elasticsearch.index.mapper; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.bulk.BulkResponse; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.collect.ImmutableOpenMap; -import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import java.io.IOException; diff --git a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java similarity index 96% rename from src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java rename to src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index f4a8a59e98b..19994257d0d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/dynamic/DynamicMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -16,7 +16,7 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.index.mapper.dynamic; +package org.elasticsearch.index.mapper; import com.google.common.collect.ImmutableMap; @@ -31,15 +31,6 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.mapper.ContentPath; -import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapperParser; -import org.elasticsearch.index.mapper.FieldMappers; -import org.elasticsearch.index.mapper.Mapper; -import org.elasticsearch.index.mapper.ParseContext; -import org.elasticsearch.index.mapper.ParsedDocument; -import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.index.mapper.StrictDynamicMappingException; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import java.io.IOException; @@ -210,7 +201,7 @@ public class DynamicMappingTests extends ElasticsearchSingleNodeTest { ctx.reset(XContentHelper.createParser(source.source()), new ParseContext.Document(), source, null); assertEquals(XContentParser.Token.START_OBJECT, ctx.parser().nextToken()); ctx.parser().nextToken(); - return mapper.root().parse(ctx); + return DocumentParser.parseObject(ctx, mapper.root()); } public void testDynamicMappingsNotNeeded() throws Exception { diff --git a/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java b/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java index c915a3cf8db..75c4bd51a87 100644 --- a/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/FieldMappersLookupTests.java @@ -57,7 +57,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testNewField() { FieldMappersLookup lookup = new FieldMappersLookup(); FakeFieldMapper f = new FakeFieldMapper("foo", "bar"); - FieldMappersLookup lookup2 = lookup.copyAndAddAll(Lists.newArrayList(f)); + FieldMappersLookup lookup2 = lookup.copyAndAddAll(newList(f)); assertNull(lookup.fullName("foo")); assertNull(lookup.indexName("bar")); @@ -76,9 +76,9 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FieldMappersLookup lookup = new FieldMappersLookup(); FakeFieldMapper f = new FakeFieldMapper("foo", "bar"); FakeFieldMapper other = new FakeFieldMapper("blah", "blah"); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f, other)); + lookup = lookup.copyAndAddAll(newList(f, other)); FakeFieldMapper f2 = new FakeFieldMapper("foo", "bar"); - FieldMappersLookup lookup2 = lookup.copyAndAddAll(Lists.newArrayList(f2)); + FieldMappersLookup lookup2 = lookup.copyAndAddAll(newList(f2)); FieldMappers mappers = lookup2.fullName("foo"); assertNotNull(mappers); @@ -93,7 +93,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testIndexName() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "foo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1)); + lookup = lookup.copyAndAddAll(newList(f1)); FieldMappers mappers = lookup.indexName("foo"); assertNotNull(mappers); @@ -105,7 +105,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1, f2)); + lookup = lookup.copyAndAddAll(newList(f1, f2)); List names = lookup.simpleMatchToIndexNames("b*"); assertTrue(names.contains("baz")); assertTrue(names.contains("boo")); @@ -115,7 +115,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FakeFieldMapper f1 = new FakeFieldMapper("foo", "baz"); FakeFieldMapper f2 = new FakeFieldMapper("bar", "boo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1, f2)); + lookup = lookup.copyAndAddAll(newList(f1, f2)); List names = lookup.simpleMatchToFullName("b*"); assertTrue(names.contains("foo")); assertTrue(names.contains("bar")); @@ -126,7 +126,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { FakeFieldMapper f2 = new FakeFieldMapper("foo", "realbar"); FakeFieldMapper f3 = new FakeFieldMapper("baz", "realfoo"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1, f2, f3)); + lookup = lookup.copyAndAddAll(newList(f1, f2, f3)); assertNotNull(lookup.smartName("foo")); assertEquals(2, lookup.smartName("foo").mappers().size()); @@ -138,7 +138,7 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testIteratorImmutable() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1)); + lookup = lookup.copyAndAddAll(newList(f1)); try { Iterator> itr = lookup.iterator(); @@ -154,12 +154,12 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { public void testGetMapper() { FakeFieldMapper f1 = new FakeFieldMapper("foo", "bar"); FieldMappersLookup lookup = new FieldMappersLookup(); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f1)); + lookup = lookup.copyAndAddAll(newList(f1)); assertEquals(f1, lookup.get("foo")); assertNull(lookup.get("bar")); // get is only by full name FakeFieldMapper f2 = new FakeFieldMapper("foo", "foo"); - lookup = lookup.copyAndAddAll(Lists.newArrayList(f2)); + lookup = lookup.copyAndAddAll(newList(f2)); try { lookup.get("foo"); fail("get should have enforced foo is unique"); @@ -168,6 +168,10 @@ public class FieldMappersLookupTests extends ElasticsearchTestCase { } } + static List> newList(FieldMapper... mapper) { + return Lists.newArrayList(mapper); + } + // this sucks how much must be overriden just do get a dummy field mapper... static class FakeFieldMapper extends AbstractFieldMapper { static Settings dummySettings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT.id).build(); diff --git a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java index fb5918373cc..19c25e07450 100644 --- a/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/copyto/CopyToMapperTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -45,7 +46,6 @@ import java.util.List; import java.util.Map; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -229,11 +229,11 @@ public class CopyToMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapperAfter = parser.parse(mappingAfter); - DocumentMapper.MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = docMapperBefore.merge(docMapperAfter.mapping(), true); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapperBefore.merge(docMapperAfter.mapping(), mergeFlags().simulate(false)); + docMapperBefore.merge(docMapperAfter.mapping(), false); fields = docMapperBefore.mappers().getMapper("copy_test").copyTo().copyToFields(); diff --git a/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java new file mode 100644 index 00000000000..fd502a04ed9 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/mapper/core/Murmur3FieldMapperTests.java @@ -0,0 +1,132 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.mapper.core; + +import org.apache.lucene.index.IndexOptions; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.Mapper; +import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.test.ElasticsearchSingleNodeTest; +import org.junit.Before; + +public class Murmur3FieldMapperTests extends ElasticsearchSingleNodeTest { + + IndexService indexService; + DocumentMapperParser parser; + + @Before + public void before() { + indexService = createIndex("test"); + parser = indexService.mapperService().documentMapperParser(); + } + + public void testDocValuesSettingNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("doc_values", false) + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [doc_values] cannot be modified")); + } + + // even setting to the default is not allowed, the setting is invalid + mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("doc_values", true) + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [doc_values] cannot be modified")); + } + } + + public void testIndexSettingNotAllowed() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("index", "not_analyzed") + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [index] cannot be modified")); + } + + // even setting to the default is not allowed, the setting is invalid + mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("index", "no") + .endObject().endObject().endObject().endObject().string(); + try { + parser.parse(mapping); + fail("expected a mapper parsing exception"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("Setting [index] cannot be modified")); + } + } + + public void testDocValuesSettingBackcompat() throws Exception { + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + indexService = createIndex("test_bwc", settings); + parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("doc_values", false) + .endObject().endObject().endObject().endObject().string(); + + DocumentMapper docMapper = parser.parse(mapping); + Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field"); + assertFalse(mapper.hasDocValues()); + } + + public void testIndexSettingBackcompat() throws Exception { + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + indexService = createIndex("test_bwc", settings); + parser = indexService.mapperService().documentMapperParser(); + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("properties").startObject("field") + .field("type", "murmur3") + .field("index", "not_analyzed") + .endObject().endObject().endObject().endObject().string(); + + DocumentMapper docMapper = parser.parse(mapping); + Murmur3FieldMapper mapper = (Murmur3FieldMapper)docMapper.mappers().getMapper("field"); + assertEquals(IndexOptions.DOCS, mapper.fieldType().indexOptions()); + } + + // TODO: add more tests +} diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java index c8760ddd561..ffbe671580f 100644 --- a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperIntegrationTests.java @@ -67,7 +67,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat * It is possible to get the token count in a search response. */ @Test - public void searchReturnsTokenCount() throws ElasticsearchException, IOException { + public void searchReturnsTokenCount() throws IOException { init(); assertSearchReturns(searchById("single"), "single"); @@ -82,7 +82,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat * It is possible to search by token count. */ @Test - public void searchByTokenCount() throws ElasticsearchException, IOException { + public void searchByTokenCount() throws IOException { init(); assertSearchReturns(searchByNumericRange(4, 4).get(), "single"); @@ -96,7 +96,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat * It is possible to search by token count. */ @Test - public void facetByTokenCount() throws ElasticsearchException, IOException { + public void facetByTokenCount() throws IOException { init(); String facetField = randomFrom(ImmutableList.of( @@ -109,7 +109,7 @@ public class TokenCountFieldMapperIntegrationTests extends ElasticsearchIntegrat assertThat(terms.getBuckets().size(), equalTo(9)); } - private void init() throws ElasticsearchException, IOException { + private void init() throws IOException { prepareCreate("test").addMapping("test", jsonBuilder().startObject() .startObject("test") .startObject("properties") diff --git a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java index 800a47d9869..ae1aeccae91 100644 --- a/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/core/TokenCountFieldMapperTests.java @@ -25,6 +25,7 @@ import org.apache.lucene.analysis.TokenStream; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -32,7 +33,6 @@ import java.io.IOException; import java.util.Arrays; import java.util.Collections; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.equalTo; /** @@ -64,12 +64,12 @@ public class TokenCountFieldMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(false)); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("keyword")); - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); // Just simulated so merge hasn't happened yet assertThat(((TokenCountFieldMapper) stage1.mappers().smartNameFieldMapper("tc")).analyzer(), equalTo("standard")); diff --git a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java index 3990ff86df3..b4ecfeeb220 100644 --- a/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/date/SimpleDateMappingTests.java @@ -26,7 +26,6 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.search.Filter; import org.apache.lucene.search.NumericRangeQuery; import org.apache.lucene.search.QueryWrapperFilter; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.LocaleUtils; @@ -38,6 +37,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.ParsedDocument; @@ -109,7 +109,7 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { try { LocaleUtils.parse("de_DE_DE_DE"); fail(); - } catch(ElasticsearchIllegalArgumentException ex) { + } catch(IllegalArgumentException ex) { // expected } assertThat(Locale.ROOT, equalTo(LocaleUtils.parse(""))); @@ -365,9 +365,9 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { Map config = getConfigurationViaXContent(initialDateFieldMapper); assertThat(config.get("format"), is("EEE MMM dd HH:mm:ss.S Z yyyy||EEE MMM dd HH:mm:ss.SSS Z yyyy")); - DocumentMapper.MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = defaultMapper.merge(mergeMapper.mapping(), false); - assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.conflicts()), mergeResult.hasConflicts(), is(false)); + assertThat("Merging resulting in conflicts: " + Arrays.asList(mergeResult.buildConflicts()), mergeResult.hasConflicts(), is(false)); assertThat(defaultMapper.mappers().getMapper("field"), is(instanceOf(DateFieldMapper.class))); DateFieldMapper mergedFieldMapper = (DateFieldMapper) defaultMapper.mappers().getMapper("field"); @@ -432,4 +432,4 @@ public class SimpleDateMappingTests extends ElasticsearchSingleNodeTest { .bytes()); assertThat(getDateAsMillis(doc.rootDoc(), "date_field"), equalTo(43000L)); } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java index 8df9f1e7d2a..dc438fc4c06 100755 --- a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java +++ b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalMapper.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.FieldMapperListener; import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperParsingException; -import org.elasticsearch.index.mapper.MergeContext; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.MergeMappingException; import org.elasticsearch.index.mapper.ObjectMapperListener; import org.elasticsearch.index.mapper.ParseContext; @@ -111,7 +111,7 @@ public class ExternalMapper extends AbstractFieldMapper { BooleanFieldMapper boolMapper = boolBuilder.build(context); GeoPointFieldMapper pointMapper = pointBuilder.build(context); GeoShapeFieldMapper shapeMapper = shapeBuilder.build(context); - Mapper stringMapper = stringBuilder.build(context); + FieldMapper stringMapper = (FieldMapper)stringBuilder.build(context); context.path().remove(); context.path().pathType(origPathType); @@ -157,12 +157,12 @@ public class ExternalMapper extends AbstractFieldMapper { private final BooleanFieldMapper boolMapper; private final GeoPointFieldMapper pointMapper; private final GeoShapeFieldMapper shapeMapper; - private final Mapper stringMapper; + private final FieldMapper stringMapper; public ExternalMapper(FieldMapper.Names names, String generatedValue, String mapperName, BinaryFieldMapper binMapper, BooleanFieldMapper boolMapper, GeoPointFieldMapper pointMapper, - GeoShapeFieldMapper shapeMapper, Mapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { + GeoShapeFieldMapper shapeMapper, FieldMapper stringMapper, Settings indexSettings, MultiFields multiFields, CopyTo copyTo) { super(names, 1.0f, Defaults.FIELD_TYPE, false, null, null, null, null, null, indexSettings, multiFields, copyTo); this.generatedValue = generatedValue; @@ -207,9 +207,6 @@ public class ExternalMapper extends AbstractFieldMapper { stringMapper.parse(context); multiFields.parse(this, context); - if (copyTo != null) { - copyTo.parse(context); - } return null; } @@ -219,7 +216,7 @@ public class ExternalMapper extends AbstractFieldMapper { } @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { // ignore this for now } diff --git a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java index 4ec787accb8..16b6fe54cb0 100644 --- a/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java +++ b/src/test/java/org/elasticsearch/index/mapper/externalvalues/ExternalRootMapper.java @@ -39,14 +39,9 @@ public class ExternalRootMapper implements RootMapper { } @Override - public Mapper parse(ParseContext context) throws IOException { - return null; - } - - @Override - public void merge(Mapper mergeWith, MergeContext mergeContext) throws MergeMappingException { + public void merge(Mapper mergeWith, MergeResult mergeResult) throws MergeMappingException { if (!(mergeWith instanceof ExternalRootMapper)) { - mergeContext.addConflict("Trying to merge " + mergeWith + " with " + this); + mergeResult.addConflict("Trying to merge " + mergeWith + " with " + this); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java index e4f12589dc5..084c6b7d3cd 100644 --- a/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoPointFieldMapperTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.MapperParsingException; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -30,7 +31,6 @@ import org.junit.Test; import java.util.ArrayList; import java.util.Arrays; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.*; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; @@ -486,11 +486,11 @@ public class GeoPointFieldMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts().length, equalTo(2)); + assertThat(mergeResult.buildConflicts().length, equalTo(2)); // todo better way of checking conflict? - assertThat("mapper [point] has different validate_lat", isIn(new ArrayList<>(Arrays.asList(mergeResult.conflicts())))); + assertThat("mapper [point] has different validate_lat", isIn(new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())))); // correct mapping and ensure no failures stage2Mapping = XContentFactory.jsonBuilder().startObject().startObject("type") @@ -498,7 +498,7 @@ public class GeoPointFieldMapperTests extends ElasticsearchSingleNodeTest { .field("validate", true).field("normalize", true).endObject().endObject() .endObject().endObject().string(); stage2 = parser.parse(stage2Mapping); - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java index f0aad36239b..b959bb41ab6 100644 --- a/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/geo/GeoShapeFieldMapperTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -35,7 +36,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.isIn; @@ -337,11 +337,11 @@ public class GeoShapeFieldMapperTests extends ElasticsearchSingleNodeTest { .field("orientation", "cw").endObject().endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), false); // check correct conflicts assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts().length, equalTo(3)); - ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.conflicts())); + assertThat(mergeResult.buildConflicts().length, equalTo(3)); + ArrayList conflicts = new ArrayList<>(Arrays.asList(mergeResult.buildConflicts())); assertThat("mapper [shape] has different strategy", isIn(conflicts)); assertThat("mapper [shape] has different tree", isIn(conflicts)); assertThat("mapper [shape] has different tree_levels or precision", isIn(conflicts)); @@ -364,7 +364,7 @@ public class GeoShapeFieldMapperTests extends ElasticsearchSingleNodeTest { .startObject("properties").startObject("shape").field("type", "geo_shape").field("precision", "1m") .field("distance_error_pct", 0.001).field("orientation", "cw").endObject().endObject().endObject().endObject().string(); stage2 = parser.parse(stage2Mapping); - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); // verify mapping changes, and ensure no failures assertThat(mergeResult.hasConflicts(), equalTo(false)); diff --git a/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java index b18c678bf00..baa77ed7616 100644 --- a/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/index/IndexTypeMapperTests.java @@ -102,7 +102,7 @@ public class IndexTypeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper mapperDisabled = parser.parse(mappingWithIndexDisabled); - mapperEnabled.merge(mapperDisabled.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mapperEnabled.merge(mapperDisabled.mapping(), false); assertThat(mapperEnabled.IndexFieldMapper().enabled(), is(false)); } @@ -118,7 +118,7 @@ public class IndexTypeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper disabledMapper = parser.parse(disabledMapping); - enabledMapper.merge(disabledMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + enabledMapper.merge(disabledMapper.mapping(), false); assertThat(enabledMapper.indexMapper().enabled(), is(false)); } diff --git a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java index ef8f0c1d259..de606371e1e 100644 --- a/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/internal/FieldNamesFieldMapperTests.java @@ -19,6 +19,7 @@ package org.elasticsearch.index.mapper.internal; +import org.apache.lucene.index.IndexOptions; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; @@ -62,6 +63,20 @@ public class FieldNamesFieldMapperTests extends ElasticsearchSingleNodeTest { assertEquals(set("", ".", ".."), extract("..")); } + public void testFieldType() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_field_names").endObject() + .endObject().endObject().string(); + + DocumentMapper docMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + FieldNamesFieldMapper fieldNamesMapper = docMapper.rootMapper(FieldNamesFieldMapper.class); + assertFalse(fieldNamesMapper.hasDocValues()); + assertEquals(IndexOptions.DOCS, fieldNamesMapper.fieldType().indexOptions()); + assertFalse(fieldNamesMapper.fieldType().tokenized()); + assertFalse(fieldNamesMapper.fieldType().stored()); + assertTrue(fieldNamesMapper.fieldType().omitNorms()); + } + public void testInjectIntoDocDuringParsing() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").endObject().endObject().string(); DocumentMapper defaultMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); @@ -162,11 +177,11 @@ public class FieldNamesFieldMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper mapperEnabled = parser.parse(enabledMapping); DocumentMapper mapperDisabled = parser.parse(disabledMapping); - mapperEnabled.merge(mapperDisabled.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mapperEnabled.merge(mapperDisabled.mapping(), false); assertFalse(mapperEnabled.rootMapper(FieldNamesFieldMapper.class).enabled()); mapperEnabled = parser.parse(enabledMapping); - mapperDisabled.merge(mapperEnabled.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mapperDisabled.merge(mapperEnabled.mapping(), false); assertTrue(mapperEnabled.rootMapper(FieldNamesFieldMapper.class).enabled()); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java index 94af55f7f3d..8923d95f405 100644 --- a/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/ip/SimpleIpMappingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.index.mapper.ip; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; @@ -68,7 +67,7 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { IpFieldMapper.ipToLong("127.0.011.1111111"); fail("Expected ip address parsing to fail but did not happen"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("not a valid ip address")); } } @@ -78,7 +77,7 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { IpFieldMapper.ipToLong("2001:db8:0:8d3:0:8a2e:70:7344"); fail("Expected ip address parsing to fail but did not happen"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("not a valid ipv4 address")); } } @@ -100,14 +99,14 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes()); } catch (MapperParsingException e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } // Verify that the default is false try { defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field3", "").endObject().bytes()); } catch (MapperParsingException e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } // Unless the global ignore_malformed option is set to true @@ -120,7 +119,7 @@ public class SimpleIpMappingTests extends ElasticsearchSingleNodeTest { try { defaultMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject().field("field2", "").endObject().bytes()); } catch (MapperParsingException e) { - assertThat(e.getCause(), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e.getCause(), instanceOf(IllegalArgumentException.class)); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java index ad3556f7873..7a73da835fd 100644 --- a/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/merge/TestMergeMapperTests.java @@ -23,12 +23,12 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.core.StringFieldMapper; import org.elasticsearch.index.mapper.object.ObjectMapper; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.*; /** @@ -51,13 +51,13 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().endObject().string(); DocumentMapper stage2 = parser.parse(stage2Mapping); - DocumentMapper.MergeResult mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = stage1.merge(stage2.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(false)); // since we are simulating, we should not have the age mapping assertThat(stage1.mappers().smartNameFieldMapper("age"), nullValue()); assertThat(stage1.mappers().smartNameFieldMapper("obj1.prop1"), nullValue()); // now merge, don't simulate - mergeResult = stage1.merge(stage2.mapping(), mergeFlags().simulate(false)); + mergeResult = stage1.merge(stage2.mapping(), false); // there is still merge failures assertThat(mergeResult.hasConflicts(), equalTo(false)); // but we have the age in @@ -70,13 +70,13 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); String objectMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").endObject().endObject().string(); DocumentMapper mapper = parser.parse(objectMapping); - assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.TRUE)); + assertNull(mapper.root().dynamic()); String withDynamicMapping = XContentFactory.jsonBuilder().startObject().startObject("type1").field("dynamic", "false").endObject().endObject().string(); DocumentMapper withDynamicMapper = parser.parse(withDynamicMapping); assertThat(withDynamicMapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); - DocumentMapper.MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = mapper.merge(withDynamicMapper.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapper.root().dynamic(), equalTo(ObjectMapper.Dynamic.FALSE)); } @@ -93,14 +93,14 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { .endObject().endObject().endObject().string(); DocumentMapper nestedMapper = parser.parse(nestedMapping); - DocumentMapper.MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), mergeFlags().simulate(true)); + MergeResult mergeResult = objectMapper.merge(nestedMapper.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts().length, equalTo(1)); - assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); + assertThat(mergeResult.buildConflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from non-nested to nested")); - mergeResult = nestedMapper.merge(objectMapper.mapping(), mergeFlags().simulate(true)); - assertThat(mergeResult.conflicts().length, equalTo(1)); - assertThat(mergeResult.conflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); + mergeResult = nestedMapper.merge(objectMapper.mapping(), true); + assertThat(mergeResult.buildConflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts()[0], equalTo("object mapping [obj] can't be changed from nested to non-nested")); } @Test @@ -117,7 +117,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("whitespace")); - DocumentMapper.MergeResult mergeResult = existing.merge(changed.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = existing.merge(changed.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("keyword")); @@ -137,7 +137,7 @@ public class TestMergeMapperTests extends ElasticsearchSingleNodeTest { DocumentMapper changed = parser.parse(mapping2); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("whitespace")); - DocumentMapper.MergeResult mergeResult = existing.merge(changed.mapping(), mergeFlags().simulate(false)); + MergeResult mergeResult = existing.merge(changed.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(((NamedAnalyzer) existing.mappers().getMapper("field").searchAnalyzer()).name(), equalTo("standard")); diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java index 8a54985f0ce..75c58a7bf8f 100644 --- a/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/multifield/MultiFieldTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; +import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.index.mapper.core.*; import org.elasticsearch.index.mapper.geo.GeoPointFieldMapper; @@ -481,4 +482,30 @@ public class MultiFieldTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapper2 = parser.parse(docMapper.mappingSource().string()); assertThat(docMapper.mappingSource(), equalTo(docMapper2.mappingSource())); } + + public void testObjectFieldNotAllowed() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field") + .field("type", "string").startObject("fields").startObject("multi").field("type", "object").endObject().endObject() + .endObject().endObject().endObject().endObject().string(); + final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + try { + parser.parse(mapping); + fail("expected mapping parse failure"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("cannot be used in multi field")); + } + } + + public void testNestedFieldNotAllowed() throws Exception { + String mapping = jsonBuilder().startObject().startObject("type").startObject("properties").startObject("my_field") + .field("type", "string").startObject("fields").startObject("multi").field("type", "nested").endObject().endObject() + .endObject().endObject().endObject().endObject().string(); + final DocumentMapperParser parser = createIndex("test").mapperService().documentMapperParser(); + try { + parser.parse(mapping); + fail("expected mapping parse failure"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("cannot be used in multi field")); + } + } } diff --git a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java index 8cc6694013f..b68c49d7956 100644 --- a/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/multifield/merge/JavaMultiFieldMergeTests.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentMapperParser; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.ParseContext.Document; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -33,7 +34,6 @@ import java.util.Arrays; import static org.elasticsearch.common.io.Streams.copyToBytesFromClasspath; import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath; -import static org.elasticsearch.index.mapper.DocumentMapper.MergeFlags.mergeFlags; import static org.hamcrest.Matchers.*; /** @@ -62,10 +62,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping2.json"); DocumentMapper docMapper2 = parser.parse(mapping); - DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper2.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -85,10 +85,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/test-mapping3.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + mergeResult = docMapper.merge(docMapper3.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper3.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -103,10 +103,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + mergeResult = docMapper.merge(docMapper4.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper4.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -138,10 +138,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade1.json"); DocumentMapper docMapper2 = parser.parse(mapping); - DocumentMapper.MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + MergeResult mergeResult = docMapper.merge(docMapper2.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper2.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper2.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -161,10 +161,10 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade2.json"); DocumentMapper docMapper3 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(false)); + mergeResult = docMapper.merge(docMapper3.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(false)); - docMapper.merge(docMapper3.mapping(), mergeFlags().simulate(false)); + docMapper.merge(docMapper3.mapping(), false); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); @@ -177,17 +177,17 @@ public class JavaMultiFieldMergeTests extends ElasticsearchSingleNodeTest { mapping = copyToStringFromClasspath("/org/elasticsearch/index/mapper/multifield/merge/upgrade3.json"); DocumentMapper docMapper4 = parser.parse(mapping); - mergeResult = docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(true)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true)); - assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values")); + mergeResult = docMapper.merge(docMapper4.mapping(), true); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); - mergeResult = docMapper.merge(docMapper4.mapping(), mergeFlags().simulate(false)); - assertThat(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts(), equalTo(true)); + mergeResult = docMapper.merge(docMapper4.mapping(), false); + assertThat(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts(), equalTo(true)); assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); - assertThat(mergeResult.conflicts()[0], equalTo("mapper [name] has different index values")); - assertThat(mergeResult.conflicts()[1], equalTo("mapper [name] has different store values")); + assertThat(mergeResult.buildConflicts()[0], equalTo("mapper [name] has different index values")); + assertThat(mergeResult.buildConflicts()[1], equalTo("mapper [name] has different store values")); // There are conflicts, but the `name.not_indexed3` has been added, b/c that field has no conflicts assertNotSame(IndexOptions.NONE, docMapper.mappers().getMapper("name").fieldType().indexOptions()); diff --git a/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java index 2012b0d7713..4b560ae011b 100644 --- a/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/size/SizeMappingTests.java @@ -114,7 +114,7 @@ public class SizeMappingTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper disabledMapper = parser.parse(disabledMapping); - enabledMapper.merge(disabledMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + enabledMapper.merge(disabledMapper.mapping(), false); assertThat(enabledMapper.SizeFieldMapper().enabled(), is(false)); } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java index b8b0a91af38..fb50de2205d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/source/DefaultSourceMappingTests.java @@ -20,10 +20,13 @@ package org.elasticsearch.index.mapper.source; import org.apache.lucene.index.IndexableField; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.compress.CompressedString; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -38,12 +41,8 @@ import java.util.Map; import static org.hamcrest.Matchers.*; -/** - * - */ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { - @Test public void testNoFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").endObject() @@ -65,7 +64,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.SMILE)); } - @Test public void testJsonFormat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").endObject() @@ -87,7 +85,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(doc.source()), equalTo(XContentType.JSON)); } - @Test public void testJsonFormatCompressed() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("_source").field("format", "json").field("compress", true).endObject() @@ -113,18 +110,25 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(XContentFactory.xContentType(uncompressed), equalTo(XContentType.JSON)); } - @Test - public void testIncludeExclude() throws Exception { + public void testIncludesBackcompat() throws Exception { String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") - .startObject("_source").field("includes", new String[]{"path1*"}).endObject() - .endObject().endObject().string(); + .startObject("_source").field("includes", new String[]{"path1*"}).endObject() + .endObject().endObject().string(); - DocumentMapper documentMapper = createIndex("test").mapperService().documentMapperParser().parse(mapping); + try { + createIndex("testbad").mapperService().documentMapperParser().parse(mapping); + fail("includes should not be allowed"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("unsupported parameters")); + } + + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() - .startObject("path1").field("field1", "value1").endObject() - .startObject("path2").field("field2", "value2").endObject() - .endObject().bytes()); + .startObject("path1").field("field1", "value1").endObject() + .startObject("path2").field("field2", "value2").endObject() + .endObject().bytes()); IndexableField sourceField = doc.rootDoc().getField("_source"); Map sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose(); @@ -132,7 +136,32 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(sourceAsMap.containsKey("path2"), equalTo(false)); } - @Test + public void testExcludesBackcompat() throws Exception { + String mapping = XContentFactory.jsonBuilder().startObject().startObject("type") + .startObject("_source").field("excludes", new String[]{"path1*"}).endObject() + .endObject().endObject().string(); + + try { + createIndex("testbad").mapperService().documentMapperParser().parse(mapping); + fail("excludes should not be allowed"); + } catch (MapperParsingException e) { + assertTrue(e.getMessage().contains("unsupported parameters")); + } + + Settings settings = ImmutableSettings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id).build(); + DocumentMapper documentMapper = createIndex("test", settings).mapperService().documentMapperParser().parse(mapping); + + ParsedDocument doc = documentMapper.parse("type", "1", XContentFactory.jsonBuilder().startObject() + .startObject("path1").field("field1", "value1").endObject() + .startObject("path2").field("field2", "value2").endObject() + .endObject().bytes()); + + IndexableField sourceField = doc.rootDoc().getField("_source"); + Map sourceAsMap = XContentFactory.xContent(XContentType.JSON).createParser(new BytesArray(sourceField.binaryValue())).mapAndClose(); + assertThat(sourceAsMap.containsKey("path1"), equalTo(false)); + assertThat(sourceAsMap.containsKey("path2"), equalTo(true)); + } + public void testDefaultMappingAndNoMapping() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -161,7 +190,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { } } - @Test public void testDefaultMappingAndWithMappingOverride() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -176,7 +204,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.sourceMapper().enabled(), equalTo(true)); } - @Test public void testDefaultMappingAndNoMappingWithMapperService() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -190,7 +217,6 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.sourceMapper().enabled(), equalTo(false)); } - @Test public void testDefaultMappingAndWithMappingOverrideWithMapperService() throws Exception { String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) .startObject("_source").field("enabled", false).endObject() @@ -208,66 +234,4 @@ public class DefaultSourceMappingTests extends ElasticsearchSingleNodeTest { assertThat(mapper.type(), equalTo("my_type")); assertThat(mapper.sourceMapper().enabled(), equalTo(true)); } - - @Test - public void testParsingWithDefaultAppliedAndNotApplied() throws Exception { - String defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("_source").array("includes", "default_field_path.").endObject() - .endObject().endObject().string(); - - MapperService mapperService = createIndex("test").mapperService(); - mapperService.merge(MapperService.DEFAULT_MAPPING, new CompressedString(defaultMapping), true); - - String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("_source").array("includes", "custom_field_path.").endObject() - .endObject().endObject().string(); - mapperService.merge("my_type", new CompressedString(mapping), true); - DocumentMapper mapper = mapperService.documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("default_field_path.")); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("custom_field_path.")); - - mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("properties").startObject("text").field("type", "string").endObject().endObject() - .endObject().endObject().string(); - mapperService.merge("my_type", new CompressedString(mapping), false); - mapper = mapperService.documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("default_field_path.")); - assertThat(mapper.sourceMapper().includes(), hasItemInArray("custom_field_path.")); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - } - - public void testDefaultNotAppliedOnUpdate() throws Exception { - XContentBuilder defaultMapping = XContentFactory.jsonBuilder().startObject().startObject(MapperService.DEFAULT_MAPPING) - .startObject("_source").array("includes", "default_field_path.").endObject() - .endObject().endObject(); - - IndexService indexService = createIndex("test", ImmutableSettings.EMPTY, MapperService.DEFAULT_MAPPING, defaultMapping); - - String mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("_source").array("includes", "custom_field_path.").endObject() - .endObject().endObject().string(); - client().admin().indices().preparePutMapping("test").setType("my_type").setSource(mapping).get(); - - DocumentMapper mapper = indexService.mapperService().documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - List includes = Arrays.asList(mapper.sourceMapper().includes()); - assertThat("default_field_path.", isIn(includes)); - assertThat("custom_field_path.", isIn(includes)); - - mapping = XContentFactory.jsonBuilder().startObject().startObject("my_type") - .startObject("properties").startObject("text").field("type", "string").endObject().endObject() - .endObject().endObject().string(); - client().admin().indices().preparePutMapping("test").setType("my_type").setSource(mapping).get(); - - mapper = indexService.mapperService().documentMapper("my_type"); - assertThat(mapper.type(), equalTo("my_type")); - includes = Arrays.asList(mapper.sourceMapper().includes()); - assertThat("default_field_path.", isIn(includes)); - assertThat("custom_field_path.", isIn(includes)); - assertThat(mapper.sourceMapper().includes().length, equalTo(2)); - } } diff --git a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java index 57c98584442..f9547fbefe6 100644 --- a/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/string/SimpleStringMappingTests.java @@ -41,8 +41,7 @@ import org.elasticsearch.index.IndexService; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.ContentPath; import org.elasticsearch.index.mapper.DocumentMapper; -import org.elasticsearch.index.mapper.DocumentMapper.MergeFlags; -import org.elasticsearch.index.mapper.DocumentMapper.MergeResult; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.index.mapper.DocumentMapperParser; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.Mapper.BuilderContext; @@ -500,8 +499,8 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest { String updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", false).endObject() .endObject().endObject().endObject().endObject().string(); - MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), MergeFlags.mergeFlags().simulate(false)); - assertFalse(Arrays.toString(mergeResult.conflicts()), mergeResult.hasConflicts()); + MergeResult mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), false); + assertFalse(Arrays.toString(mergeResult.buildConflicts()), mergeResult.hasConflicts()); doc = defaultMapper.parse("type", "1", XContentFactory.jsonBuilder() .startObject() @@ -515,10 +514,10 @@ public class SimpleStringMappingTests extends ElasticsearchSingleNodeTest { updatedMapping = XContentFactory.jsonBuilder().startObject().startObject("type") .startObject("properties").startObject("field").field("type", "string").startObject("norms").field("enabled", true).endObject() .endObject().endObject().endObject().endObject().string(); - mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), MergeFlags.mergeFlags()); + mergeResult = defaultMapper.merge(parser.parse(updatedMapping).mapping(), true); assertTrue(mergeResult.hasConflicts()); - assertEquals(1, mergeResult.conflicts().length); - assertTrue(mergeResult.conflicts()[0].contains("cannot enable norms")); + assertEquals(1, mergeResult.buildConflicts().length); + assertTrue(mergeResult.buildConflicts()[0].contains("cannot enable norms")); } public void testTermsFilter() throws Exception { diff --git a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java index 08d8af1afa4..c97fa5b789d 100644 --- a/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/timestamp/TimestampMappingTests.java @@ -141,7 +141,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .endObject().endObject().string(); DocumentMapper disabledMapper = parser.parse(disabledMapping); - enabledMapper.merge(disabledMapper.mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + enabledMapper.merge(disabledMapper.mapping(), false); assertThat(enabledMapper.timestampFieldMapper().enabled(), is(false)); } @@ -443,11 +443,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -460,11 +460,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -477,11 +477,11 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { new MappingMetaData.Id(null), new MappingMetaData.Routing(false, null), timestamp, false); BytesStreamOutput out = new BytesStreamOutput(); - MappingMetaData.writeTo(expected, out); + expected.writeTo(out); out.close(); BytesReference bytes = out.bytes(); - MappingMetaData metaData = MappingMetaData.readFrom(new BytesStreamInput(bytes)); + MappingMetaData metaData = MappingMetaData.PROTO.readFrom(new BytesStreamInput(bytes)); assertThat(metaData, is(expected)); } @@ -502,8 +502,8 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .startObject("_timestamp").field("enabled", randomBoolean()).startObject("fielddata").field("loading", "eager").field("format", "array").endObject().field("store", "yes").endObject() .endObject().endObject().string(); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); - assertThat(mergeResult.conflicts().length, equalTo(0)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), false); + assertThat(mergeResult.buildConflicts().length, equalTo(0)); assertThat(docMapper.timestampFieldMapper().fieldDataType().getLoading(), equalTo(FieldMapper.Loading.EAGER)); assertThat(docMapper.timestampFieldMapper().fieldDataType().getFormat(indexSettings), equalTo("array")); } @@ -576,13 +576,13 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .endObject() .endObject().endObject().string(); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true); String[] expectedConflicts = {"mapper [_timestamp] has different index values", "mapper [_timestamp] has different store values", "Cannot update default in _timestamp value. Value is 1970-01-01 now encountering 1970-01-02", "Cannot update path in _timestamp value. Value is foo path in merged mapping is bar", "mapper [_timestamp] has different tokenize values"}; - for (String conflict : mergeResult.conflicts()) { + for (String conflict : mergeResult.buildConflicts()) { assertThat(conflict, isIn(expectedConflicts)); } - assertThat(mergeResult.conflicts().length, equalTo(expectedConflicts.length)); + assertThat(mergeResult.buildConflicts().length, equalTo(expectedConflicts.length)); assertThat(docMapper.timestampFieldMapper().fieldDataType().getLoading(), equalTo(FieldMapper.Loading.LAZY)); assertTrue(docMapper.timestampFieldMapper().enabled()); assertThat(docMapper.timestampFieldMapper().fieldDataType().getFormat(indexSettings), equalTo("doc_values")); @@ -610,7 +610,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { .endObject() .endObject().endObject().string(); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping).mapping(), true); List expectedConflicts = new ArrayList<>(); expectedConflicts.add("mapper [_timestamp] has different index values"); expectedConflicts.add("mapper [_timestamp] has different tokenize values"); @@ -620,7 +620,7 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { expectedConflicts.add("mapper [_timestamp] has different doc_values values"); } - for (String conflict : mergeResult.conflicts()) { + for (String conflict : mergeResult.buildConflicts()) { assertThat(conflict, isIn(expectedConflicts)); } } @@ -671,10 +671,10 @@ public class TimestampMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper docMapper = parser.parse(mapping1); docMapper.refreshSource(); docMapper = parser.parse(docMapper.mappingSource().string()); - DocumentMapper.MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); - assertThat(mergeResult.conflicts().length, equalTo(conflict == null ? 0:1)); + MergeResult mergeResult = docMapper.merge(parser.parse(mapping2).mapping(), true); + assertThat(mergeResult.buildConflicts().length, equalTo(conflict == null ? 0:1)); if (conflict != null) { - assertThat(mergeResult.conflicts()[0], containsString(conflict)); + assertThat(mergeResult.buildConflicts()[0], containsString(conflict)); } } diff --git a/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java index 2c9868b4ced..92b9ba83859 100644 --- a/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/ttl/TTLMappingTests.java @@ -116,8 +116,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper mapperWithoutTtl = parser.parse(mappingWithoutTtl); DocumentMapper mapperWithTtl = parser.parse(mappingWithTtl); - DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false); - DocumentMapper.MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), mergeFlags); + MergeResult mergeResult = mapperWithoutTtl.merge(mapperWithTtl.mapping(), false); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(mapperWithoutTtl.TTLFieldMapper().enabled(), equalTo(true)); @@ -143,8 +142,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(updatedMapping); - DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(false); - DocumentMapper.MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), mergeFlags); + MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(false)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); @@ -158,8 +156,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { DocumentMapper initialMapper = parser.parse(mappingWithTtl); DocumentMapper updatedMapper = parser.parse(mappingWithTtlDisabled); - DocumentMapper.MergeFlags mergeFlags = DocumentMapper.MergeFlags.mergeFlags().simulate(true); - DocumentMapper.MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), mergeFlags); + MergeResult mergeResult = initialMapper.merge(updatedMapper.mapping(), true); assertThat(mergeResult.hasConflicts(), equalTo(true)); assertThat(initialMapper.TTLFieldMapper().enabled(), equalTo(true)); @@ -197,7 +194,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { public void testNoConflictIfNothingSetAndDisabledLater() throws Exception { IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlDisabled = getMappingWithTtlDisabled("7d"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDisabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(randomBoolean())); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDisabled.string()), true).mapping(), randomBoolean()); assertFalse(mergeResult.hasConflicts()); } @@ -205,7 +202,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { public void testNoConflictIfNothingSetAndEnabledLater() throws Exception { IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type"); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(randomBoolean())); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), randomBoolean()); assertFalse(mergeResult.hasConflicts()); } @@ -214,7 +211,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithTtlEnabled); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":true,\"default\":360000},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -227,7 +224,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { CompressedString mappingAfterCreation = indexService.mapperService().documentMapper("type").refreshSource(); assertThat(mappingAfterCreation, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); XContentBuilder mappingWithOnlyDefaultSet = getMappingWithOnlyTtlDefaultSet("6m"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithOnlyDefaultSet.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); assertThat(mappingAfterMerge, equalTo(new CompressedString("{\"type\":{\"_ttl\":{\"enabled\":false},\"properties\":{\"field\":{\"type\":\"string\"}}}}"))); @@ -241,7 +238,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { IndexService indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithTtl); CompressedString mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlDifferentDefault = getMappingWithTtlEnabled("7d"); - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDifferentDefault.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlDifferentDefault.string()), true).mapping(), true); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - no mappings applied CompressedString mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -253,7 +250,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); XContentBuilder mappingWithTtlEnabled = getMappingWithTtlEnabled(); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), true); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -265,7 +262,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingBeforeMerge = indexService.mapperService().documentMapper("type").mappingSource(); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), true); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - no mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -276,7 +273,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { mappingWithoutTtl = getMappingWithTtlDisabled("6d"); indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type", mappingWithoutTtl); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); @@ -286,7 +283,7 @@ public class TTLMappingTests extends ElasticsearchSingleNodeTest { // check if switching simulate flag off works if nothing was applied in the beginning indexService = createIndex("testindex", ImmutableSettings.settingsBuilder().build(), "type"); mappingWithTtlEnabled = getMappingWithTtlEnabled("7d"); - mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingWithTtlEnabled.string()), true).mapping(), false); assertFalse(mergeResult.hasConflicts()); // make sure simulate flag actually worked - mappings applied mappingAfterMerge = indexService.mapperService().documentMapper("type").refreshSource(); diff --git a/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java b/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java index aa227fd7cce..7c12cd14c36 100644 --- a/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java +++ b/src/test/java/org/elasticsearch/index/mapper/update/UpdateMappingTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.MergeResult; import org.elasticsearch.test.ElasticsearchSingleNodeTest; import org.junit.Test; @@ -79,9 +80,9 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest { private void testNoConflictWhileMergingAndMappingChanged(XContentBuilder mapping, XContentBuilder mappingUpdate, XContentBuilder expectedMapping) throws IOException { IndexService indexService = createIndex("test", ImmutableSettings.settingsBuilder().build(), "type", mapping); // simulate like in MetaDataMappingService#putMapping - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(false)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), false); // assure we have no conflicts - assertThat(mergeResult.conflicts().length, equalTo(0)); + assertThat(mergeResult.buildConflicts().length, equalTo(0)); // make sure mappings applied CompressedString mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate.toString(), equalTo(expectedMapping.string())); @@ -103,9 +104,9 @@ public class UpdateMappingTests extends ElasticsearchSingleNodeTest { IndexService indexService = createIndex("test", ImmutableSettings.settingsBuilder().build(), "type", mapping); CompressedString mappingBeforeUpdate = indexService.mapperService().documentMapper("type").mappingSource(); // simulate like in MetaDataMappingService#putMapping - DocumentMapper.MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), DocumentMapper.MergeFlags.mergeFlags().simulate(true)); + MergeResult mergeResult = indexService.mapperService().documentMapper("type").merge(indexService.mapperService().parse("type", new CompressedString(mappingUpdate.bytes()), true).mapping(), true); // assure we have conflicts - assertThat(mergeResult.conflicts().length, equalTo(1)); + assertThat(mergeResult.buildConflicts().length, equalTo(1)); // make sure simulate flag actually worked - no mappings applied CompressedString mappingAfterUpdate = indexService.mapperService().documentMapper("type").mappingSource(); assertThat(mappingAfterUpdate, equalTo(mappingBeforeUpdate)); diff --git a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java index a045f3bbb6e..00cca381db2 100644 --- a/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java +++ b/src/test/java/org/elasticsearch/index/merge/policy/MergePolicySettingsTest.java @@ -20,10 +20,8 @@ package org.elasticsearch.index.merge.policy; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; -import org.apache.lucene.index.TieredMergePolicy; import org.apache.lucene.store.Directory; import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -94,19 +92,19 @@ public class MergePolicySettingsTest extends ElasticsearchTestCase { try { new LogDocMergePolicyProvider(createStore(build(-0.1)), service).getMergePolicy().getNoCFSRatio(); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } try { new LogDocMergePolicyProvider(createStore(build(1.1)), service).getMergePolicy().getNoCFSRatio(); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } try { new LogDocMergePolicyProvider(createStore(build("Falsch")), service).getMergePolicy().getNoCFSRatio(); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } diff --git a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java index 6e928ca4f81..b2964807d87 100644 --- a/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java +++ b/src/test/java/org/elasticsearch/index/query/SimpleIndexQueryParserTests.java @@ -51,12 +51,14 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TermRangeQuery; import org.apache.lucene.search.WildcardQuery; import org.apache.lucene.search.spans.FieldMaskingSpanQuery; +import org.apache.lucene.search.spans.SpanContainingQuery; import org.apache.lucene.search.spans.SpanFirstQuery; import org.apache.lucene.search.spans.SpanMultiTermQueryWrapper; import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanNotQuery; import org.apache.lucene.search.spans.SpanOrQuery; import org.apache.lucene.search.spans.SpanTermQuery; +import org.apache.lucene.search.spans.SpanWithinQuery; import org.apache.lucene.spatial.prefix.IntersectsPrefixTreeFilter; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; @@ -64,7 +66,6 @@ import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.NumericUtils; import org.apache.lucene.util.automaton.TooComplexToDeterminizeException; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; @@ -90,7 +91,6 @@ import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.core.NumberFieldMapper; import org.elasticsearch.index.search.NumericRangeFieldDataFilter; -import org.elasticsearch.index.search.child.CustomQueryWrappingFilter; import org.elasticsearch.index.search.child.ParentConstantScoreQuery; import org.elasticsearch.index.search.geo.GeoDistanceFilter; import org.elasticsearch.index.search.geo.GeoPolygonFilter; @@ -132,11 +132,13 @@ import static org.elasticsearch.index.query.QueryBuilders.prefixQuery; import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery; import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; import static org.elasticsearch.index.query.QueryBuilders.regexpQuery; +import static org.elasticsearch.index.query.QueryBuilders.spanContainingQuery; import static org.elasticsearch.index.query.QueryBuilders.spanFirstQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNearQuery; import static org.elasticsearch.index.query.QueryBuilders.spanNotQuery; import static org.elasticsearch.index.query.QueryBuilders.spanOrQuery; import static org.elasticsearch.index.query.QueryBuilders.spanTermQuery; +import static org.elasticsearch.index.query.QueryBuilders.spanWithinQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termsQuery; import static org.elasticsearch.index.query.QueryBuilders.wildcardQuery; @@ -464,7 +466,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testTermQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(termQuery("age", 34).buildAsBytes()).query(); - TermQuery fieldQuery = unwrapTermQuery(parsedQuery, true); + TermQuery fieldQuery = unwrapTermQuery(parsedQuery); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); } @@ -472,15 +474,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testTermQuery() throws IOException { IndexQueryParserService queryParser = queryParser(); String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term.json"); - TermQuery fieldQuery = unwrapTermQuery(queryParser.parse(query).query(), true); + TermQuery fieldQuery = unwrapTermQuery(queryParser.parse(query).query()); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); } - private static TermQuery unwrapTermQuery(Query q, boolean expectConstantWrapper) { - if (expectConstantWrapper) { - assertThat(q, instanceOf(ConstantScoreQuery.class)); - q = ((ConstantScoreQuery) q).getQuery(); - } + private static TermQuery unwrapTermQuery(Query q) { assertThat(q, instanceOf(TermQuery.class)); return (TermQuery) q; } @@ -545,7 +543,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(termQuery("age", 34).boost(2.0f)).query(); - TermQuery fieldQuery = unwrapTermQuery(parsedQuery, true); + TermQuery fieldQuery = unwrapTermQuery(parsedQuery); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); assertThat((double) parsedQuery.getBoost(), closeTo(2.0, 0.01)); } @@ -563,7 +561,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); String query = copyToStringFromClasspath("/org/elasticsearch/index/query/term-with-boost.json"); Query parsedQuery = queryParser.parse(query).query(); - TermQuery fieldQuery = unwrapTermQuery(parsedQuery, true); + TermQuery fieldQuery = unwrapTermQuery(parsedQuery); assertThat(fieldQuery.getTerm().bytes(), equalTo(indexedValueForSearch(34l))); assertThat((double) parsedQuery.getBoost(), closeTo(2.0, 0.01)); } @@ -862,7 +860,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), rangeFilter("age").from(23).to(54).includeLower(true).includeUpper(false))).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -873,7 +871,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -884,7 +882,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); + new QueryWrapperFilter(NumericRangeQuery.newLongRange("age", 23L, 54L, true, false))); assertEquals(expected, parsedQuery); } @@ -909,14 +907,14 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(filteredQuery(termQuery("name.first", "shay"), boolFilter().must(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")).mustNot(termFilter("name.first", "shay2")).should(termFilter("name.first", "shay3")))).query(); BooleanQuery filter = new BooleanQuery(); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); filter.setMinimumNumberShouldMatch(1); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(filter)); + new QueryWrapperFilter(filter)); assertEquals(expected, parsedQuery); } @@ -927,14 +925,14 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery filter = new BooleanQuery(); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); - filter.add(Queries.wrap(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay2"))), Occur.MUST_NOT); + filter.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay3"))), Occur.SHOULD); filter.setMinimumNumberShouldMatch(1); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(filter)); + new QueryWrapperFilter(filter)); assertEquals(expected, parsedQuery); } @@ -943,9 +941,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), andFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(and)); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -955,11 +953,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -969,11 +967,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter-named.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -983,11 +981,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/and-filter2.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery and = new BooleanQuery(); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); - and.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.MUST); + and.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.MUST); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(and)); + new QueryWrapperFilter(and)); assertEquals(expected, parsedQuery); } @@ -996,9 +994,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), orFilter(termFilter("name.first", "shay1"), termFilter("name.first", "shay4")))).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(or)); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1008,11 +1006,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(or)); + new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1022,11 +1020,11 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/or-filter2.json"); Query parsedQuery = queryParser.parse(query).query(); BooleanQuery or = new BooleanQuery(); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); - or.add(Queries.wrap(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1"))), Occur.SHOULD); + or.add(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay4"))), Occur.SHOULD); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(or)); + new QueryWrapperFilter(or)); assertEquals(expected, parsedQuery); } @@ -1034,7 +1032,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { public void testNotFilteredQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); Query parsedQuery = queryParser.parse(filteredQuery(matchAllQuery(), notFilter(termFilter("name.first", "shay1")))).query(); - ConstantScoreQuery expected = new ConstantScoreQuery(Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + ConstantScoreQuery expected = new ConstantScoreQuery(new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1046,7 +1044,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertThat(parsedQuery, instanceOf(FilteredQuery.class)); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1057,7 +1055,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1068,7 +1066,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { Query parsedQuery = queryParser.parse(query).query(); FilteredQuery expected = new FilteredQuery( new TermQuery(new Term("name.first", "shay")), - Queries.wrap(Queries.not(Queries.wrap(new TermQuery(new Term("name.first", "shay1")))))); + new QueryWrapperFilter(Queries.not(new QueryWrapperFilter(new TermQuery(new Term("name.first", "shay1")))))); assertEquals(expected, parsedQuery); } @@ -1191,7 +1189,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { queryParser.parse(query).query(); fail(); } catch (QueryParsingException ex) { - assertThat(ex.getMessage(), equalTo("[test] [terms] query does not support multiple fields")); + assertThat(ex.getMessage(), equalTo("[terms] query does not support multiple fields")); } } @@ -1207,7 +1205,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { queryParser.parse(query).query(); fail(); } catch (QueryParsingException ex) { - assertThat(ex.getMessage(), equalTo("[test] [terms] filter does not support multiple fields")); + assertThat(ex.getMessage(), equalTo("[terms] filter does not support multiple fields")); } } @@ -1439,6 +1437,50 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { assertThat(((SpanTermQuery) spanNotQuery.getExclude()).getTerm(), equalTo(new Term("age", longToPrefixCoded(35, 0)))); } + @Test + public void testSpanWithinQueryBuilder() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + Query actualQuery = queryParser.parse(spanWithinQuery() + .big(spanTermQuery("age", 34)) + .little(spanTermQuery("age", 35))) + .query(); + assertEquals(expectedQuery, actualQuery); + } + + @Test + public void testSpanWithinQueryParser() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanWithinQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanWithin.json"); + Query actualQuery = queryParser.parse(queryText).query(); + assertEquals(expectedQuery, actualQuery); + } + + @Test + public void testSpanContainingQueryBuilder() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + Query actualQuery = queryParser.parse(spanContainingQuery() + .big(spanTermQuery("age", 34)) + .little(spanTermQuery("age", 35))) + .query(); + assertEquals(expectedQuery, actualQuery); + } + + @Test + public void testSpanContainingQueryParser() throws IOException { + IndexQueryParserService queryParser = queryParser(); + Query expectedQuery = new SpanContainingQuery(new SpanTermQuery(new Term("age", longToPrefixCoded(34, 0))), + new SpanTermQuery(new Term("age", longToPrefixCoded(35, 0)))); + String queryText = copyToStringFromClasspath("/org/elasticsearch/index/query/spanContaining.json"); + Query actualQuery = queryParser.parse(queryText).query(); + assertEquals(expectedQuery, actualQuery); + } + @Test public void testSpanFirstQueryBuilder() throws IOException { IndexQueryParserService queryParser = queryParser(); @@ -2415,8 +2457,8 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { try { functionScoreQuery().add(factorFunction(2.0f).setWeight(2.0f)); fail("Expect exception here because boost_factor must not have a weight"); - } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getDetailedMessage(), containsString(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE)); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage(), containsString(BoostScoreFunction.BOOST_WEIGHT_ERROR_MESSAGE)); } query = jsonBuilder().startObject().startObject("function_score") .startArray("functions") @@ -2445,7 +2487,7 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { } // https://github.com/elasticsearch/elasticsearch/issues/6722 - public void testEmptyBoolSubClausesIsMatchAll() throws ElasticsearchException, IOException { + public void testEmptyBoolSubClausesIsMatchAll() throws IOException { String query = copyToStringFromClasspath("/org/elasticsearch/index/query/bool-query-with-empty-clauses-for-parsing.json"); IndexService indexService = createIndex("testidx", client().admin().indices().prepareCreate("testidx") .addMapping("foo") @@ -2454,9 +2496,9 @@ public class SimpleIndexQueryParserTests extends ElasticsearchSingleNodeTest { IndexQueryParserService queryParser = indexService.queryParserService(); Query parsedQuery = queryParser.parse(query).query(); assertThat(parsedQuery, instanceOf(ConstantScoreQuery.class)); - assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(CustomQueryWrappingFilter.class)); - assertThat(((CustomQueryWrappingFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery(), instanceOf(ParentConstantScoreQuery.class)); - assertThat(((CustomQueryWrappingFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery().toString(), equalTo("parent_filter[foo](filtered(*:*)->cache(QueryWrapperFilter(_type:foo)))")); + assertThat(((ConstantScoreQuery) parsedQuery).getQuery(), instanceOf(QueryWrapperFilter.class)); + assertThat(((QueryWrapperFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery(), instanceOf(ParentConstantScoreQuery.class)); + assertThat(((QueryWrapperFilter) ((ConstantScoreQuery) parsedQuery).getQuery()).getQuery().toString(), equalTo("parent_filter[foo](filtered(*:*)->QueryWrapperFilter(_type:foo))")); SearchContext.removeCurrent(); } diff --git a/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java b/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java similarity index 59% rename from src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java rename to src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java index fc6d110bf16..951b31e59a6 100644 --- a/src/main/java/org/elasticsearch/ElasticsearchIllegalArgumentException.java +++ b/src/test/java/org/elasticsearch/index/query/TestQueryParsingException.java @@ -17,29 +17,21 @@ * under the License. */ -package org.elasticsearch; +package org.elasticsearch.index.query; -import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.index.Index; /** - * + * Class used to avoid dragging QueryContext into unit testing framework for + * basic exception handling */ -public class ElasticsearchIllegalArgumentException extends ElasticsearchException { +public class TestQueryParsingException extends QueryParsingException { - public ElasticsearchIllegalArgumentException() { - super(null); + public TestQueryParsingException(Index index, int line, int col, String msg, Throwable cause) { + super(index, line, col, msg, cause); } - public ElasticsearchIllegalArgumentException(String msg) { - super(msg); + public TestQueryParsingException(Index index, String msg, Throwable cause) { + super(index, UNKNOWN_POSITION, UNKNOWN_POSITION, msg, cause); } - - public ElasticsearchIllegalArgumentException(String msg, Throwable cause) { - super(msg, cause); - } - - @Override - public RestStatus status() { - return RestStatus.BAD_REQUEST; - } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/index/query/spanContaining.json b/src/test/java/org/elasticsearch/index/query/spanContaining.json new file mode 100644 index 00000000000..13f91d88b44 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/query/spanContaining.json @@ -0,0 +1,14 @@ +{ + span_containing:{ + big:{ + span_term:{ + age:34 + } + }, + little:{ + span_term:{ + age:35 + } + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/query/spanWithin.json b/src/test/java/org/elasticsearch/index/query/spanWithin.json new file mode 100644 index 00000000000..7cf767cdf12 --- /dev/null +++ b/src/test/java/org/elasticsearch/index/query/spanWithin.json @@ -0,0 +1,14 @@ +{ + span_within:{ + big:{ + span_term:{ + age:34 + } + }, + little:{ + span_term:{ + age:35 + } + } + } +} \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java b/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java index 21bae1d20ba..d6aa83c341b 100644 --- a/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/AbstractChildTests.java @@ -135,10 +135,6 @@ public abstract class AbstractChildTests extends ElasticsearchSingleNodeTest { } } - static Filter wrap(Filter filter) { - return SearchContext.current().filterCache().cache(filter, null, SearchContext.current().indexShard().indexService().queryParserService().autoFilterCachePolicy()); - } - static BitDocIdSetFilter wrapWithBitSetFilter(Filter filter) { return SearchContext.current().bitsetFilterCache().getBitDocIdSetFilter(filter); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java index 0fce3aa691a..6dff9747127 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenConstantScoreQueryTests.java @@ -38,12 +38,12 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.TestUtil; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; @@ -53,7 +53,6 @@ import org.elasticsearch.index.mapper.internal.ParentFieldMapper; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; @@ -94,8 +93,8 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { Query childQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); - Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(NonNestedDocsFilter.INSTANCE)); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + Query query = new ChildrenConstantScoreQuery(parentChildIndexFieldData, childQuery, "parent", "child", parentFilter, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } @@ -127,7 +126,7 @@ public class ChildrenConstantScoreQueryTests extends AbstractChildTests { )); TermQuery childQuery = new TermQuery(new Term("field1", "value" + (1 + random().nextInt(3)))); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); int shortCircuitParentDocSet = random().nextInt(5); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); diff --git a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java index 07938b53e3f..52ffbf022ea 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ChildrenQueryTests.java @@ -45,6 +45,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; @@ -64,7 +65,6 @@ import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.functionscore.fieldvaluefactor.FieldValueFactorFunctionBuilder; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; @@ -110,11 +110,11 @@ public class ChildrenQueryTests extends AbstractChildTests { ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)]; ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); + BitDocIdSetFilter parentFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "parent")))); int minChildren = random().nextInt(10); int maxChildren = scaledRandomIntBetween(minChildren, 10); Query query = new ChildrenQuery(parentChildIndexFieldData, "parent", "child", parentFilter, childQuery, scoreType, minChildren, - maxChildren, 12, wrapWithBitSetFilter(NonNestedDocsFilter.INSTANCE)); + maxChildren, 12, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java index 49496d8f6e6..feb320942b0 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ParentConstantScoreQueryTests.java @@ -38,13 +38,13 @@ import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.Uid; @@ -93,7 +93,7 @@ public class ParentConstantScoreQueryTests extends AbstractChildTests { Query parentQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); + BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); Query query = new ParentConstantScoreQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java index 79b4a9bc79e..0614a6c2439 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ParentQueryTests.java @@ -42,6 +42,7 @@ import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.MultiCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopScoreDocCollector; import org.apache.lucene.search.join.BitDocIdSetFilter; @@ -49,7 +50,6 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.util.FixedBitSet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.common.lease.Releasables; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.Uid; @@ -96,7 +96,7 @@ public class ParentQueryTests extends AbstractChildTests { Query parentQuery = new TermQuery(new Term("field", "value")); ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(Queries.wrap(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); + BitDocIdSetFilter childrenFilter = wrapWithBitSetFilter(new QueryWrapperFilter(new TermQuery(new Term(TypeFieldMapper.NAME, "child")))); Query query = new ParentQuery(parentChildIndexFieldData, parentQuery, "parent", childrenFilter); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java b/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java index 65727baed2b..27d8641789a 100644 --- a/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/ScoreTypeTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.search.child; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.junit.Test; @@ -57,9 +56,9 @@ public class ScoreTypeTests extends ElasticsearchTestCase { } /** - * Should throw {@link ElasticsearchIllegalArgumentException} instead of NPE. + * Should throw {@link IllegalArgumentException} instead of NPE. */ - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void nullFromString_throwsException() { ScoreType.fromString(null); } @@ -67,7 +66,7 @@ public class ScoreTypeTests extends ElasticsearchTestCase { /** * Failure should not change (and the value should never match anything...). */ - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void unrecognizedFromString_throwsException() { ScoreType.fromString("unrecognized value"); } diff --git a/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java b/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java index 6def1d5a752..41750cff10b 100644 --- a/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java +++ b/src/test/java/org/elasticsearch/index/search/child/TopChildrenQueryTests.java @@ -24,9 +24,9 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.QueryUtils; import org.apache.lucene.search.TermQuery; import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.fielddata.plain.ParentChildIndexFieldData; import org.elasticsearch.index.mapper.internal.ParentFieldMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.internal.SearchContext; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -56,7 +56,7 @@ public class TopChildrenQueryTests extends AbstractChildTests { ScoreType scoreType = ScoreType.values()[random().nextInt(ScoreType.values().length)]; ParentFieldMapper parentFieldMapper = SearchContext.current().mapperService().documentMapper("child").parentFieldMapper(); ParentChildIndexFieldData parentChildIndexFieldData = SearchContext.current().fieldData().getForField(parentFieldMapper); - Query query = new TopChildrenQuery(parentChildIndexFieldData, childQuery, "child", "parent", scoreType, 1, 1, wrapWithBitSetFilter(NonNestedDocsFilter.INSTANCE)); + Query query = new TopChildrenQuery(parentChildIndexFieldData, childQuery, "child", "parent", scoreType, 1, 1, wrapWithBitSetFilter(Queries.newNonNestedFilter())); QueryUtils.check(query); } diff --git a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java index 4af03801c94..940e10e77df 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/AbstractNumberNestedSortingTests.java @@ -30,6 +30,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -216,8 +217,8 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData MultiValueMode sortMode = MultiValueMode.SUM; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter)); ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); @@ -251,7 +252,7 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData assertThat(topDocs.scoreDocs[4].doc, equalTo(3)); assertThat(((Number) ((FieldDoc) topDocs.scoreDocs[4]).fields[0]).intValue(), equalTo(9)); - childFilter = Queries.wrap(new TermQuery(new Term("filter_1", "T"))); + childFilter = new QueryWrapperFilter(new TermQuery(new Term("filter_1", "T"))); nestedComparatorSource = createFieldComparator("field2", sortMode, null, createNested(parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new FilteredQuery(new MatchAllDocsQuery(), childFilter), @@ -328,7 +329,7 @@ public abstract class AbstractNumberNestedSortingTests extends AbstractFieldData protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java index 800320323cc..12776cec73a 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/DoubleNestedSortingTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -69,7 +70,7 @@ public class DoubleNestedSortingTests extends AbstractNumberNestedSortingTests { @Override protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java index b1b1433cdfc..12cd10a2cd2 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/FloatNestedSortingTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TopDocs; @@ -68,7 +69,7 @@ public class FloatNestedSortingTests extends DoubleNestedSortingTests { protected void assertAvgScoreMode(Filter parentFilter, IndexSearcher searcher, IndexFieldData.XFieldComparatorSource innerFieldComparator) throws IOException { MultiValueMode sortMode = MultiValueMode.AVG; - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); XFieldComparatorSource nestedComparatorSource = createFieldComparator("field2", sortMode, -127, createNested(parentFilter, childFilter)); Query query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); Sort sort = new Sort(new SortField("field2", nestedComparatorSource)); diff --git a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 1029523a325..e4885727434 100644 --- a/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -33,6 +33,7 @@ import org.apache.lucene.search.FilteredQuery; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TermQuery; @@ -117,8 +118,8 @@ public class NestedSortingTests extends AbstractFieldDataTests { } private TopDocs getTopDocs(IndexSearcher searcher, IndexFieldData indexFieldData, String missingValue, MultiValueMode sortMode, int n, boolean reverse) throws IOException { - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(new TermQuery(new Term("__type", "child"))); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "child"))); XFieldComparatorSource nestedComparatorSource = indexFieldData.comparatorSource(missingValue, sortMode, createNested(parentFilter, childFilter)); Query query = new ConstantScoreQuery(parentFilter); Sort sort = new Sort(new SortField("f", nestedComparatorSource, reverse)); @@ -283,8 +284,8 @@ public class NestedSortingTests extends AbstractFieldDataTests { MultiValueMode sortMode = MultiValueMode.MIN; IndexSearcher searcher = new IndexSearcher(DirectoryReader.open(writer, false)); PagedBytesIndexFieldData indexFieldData = getForField("field2"); - Filter parentFilter = Queries.wrap(new TermQuery(new Term("__type", "parent"))); - Filter childFilter = Queries.wrap(Queries.not(parentFilter)); + Filter parentFilter = new QueryWrapperFilter(new TermQuery(new Term("__type", "parent"))); + Filter childFilter = new QueryWrapperFilter(Queries.not(parentFilter)); BytesRefFieldComparatorSource nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter)); ToParentBlockJoinQuery query = new ToParentBlockJoinQuery(new FilteredQuery(new MatchAllDocsQuery(), childFilter), new BitDocIdSetCachingWrapperFilter(parentFilter), ScoreMode.None); @@ -324,7 +325,7 @@ public class NestedSortingTests extends AbstractFieldDataTests { BooleanQuery bq = new BooleanQuery(); bq.add(parentFilter, Occur.MUST_NOT); bq.add(new TermQuery(new Term("filter_1", "T")), Occur.MUST); - childFilter = Queries.wrap(bq); + childFilter = new QueryWrapperFilter(bq); nestedComparatorSource = new BytesRefFieldComparatorSource(indexFieldData, null, sortMode, createNested(parentFilter, childFilter)); query = new ToParentBlockJoinQuery( new FilteredQuery(new MatchAllDocsQuery(), childFilter), diff --git a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index d109e558beb..e17721a0f00 100644 --- a/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.routing.MutableShardRouting; import org.elasticsearch.cluster.routing.ShardRouting; @@ -58,7 +56,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { try { assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder().put(IndexShard.INDEX_FLUSH_ON_CLOSE, "FOOBAR").build())); fail("exception expected"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { } assertEquals(newValue, shard.isFlushOnClose()); @@ -147,7 +145,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest { try { shard.deleteShardState(); fail("shard is active metadata delete must fail"); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // fine - only delete if non-active } diff --git a/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java b/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java index 39f48f8d637..ccf87f2efef 100644 --- a/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java +++ b/src/test/java/org/elasticsearch/index/shard/ShardPathTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.index.shard; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -52,7 +51,7 @@ public class ShardPathTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailLoadShardPathOnMultiState() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { ImmutableSettings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF"); @@ -66,7 +65,7 @@ public class ShardPathTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) { ImmutableSettings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "foobar"); diff --git a/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java b/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java new file mode 100644 index 00000000000..a063edf7a8f --- /dev/null +++ b/src/test/java/org/elasticsearch/index/store/IndexStoreTests.java @@ -0,0 +1,86 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.store; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.lucene.store.*; +import org.apache.lucene.util.Constants; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.io.IOException; +import java.nio.file.Path; + +/** + */ +public class IndexStoreTests extends ElasticsearchTestCase { + + public void testStoreDirectory() throws IOException { + final Path tempDir = createTempDir(); + final IndexStoreModule.Type[] values = IndexStoreModule.Type.values(); + final IndexStoreModule.Type type = RandomPicks.randomFrom(random(), values); + Settings settings = ImmutableSettings.settingsBuilder().put(IndexStoreModule.STORE_TYPE, type.name()).build(); + FsDirectoryService service = new FsDirectoryService(settings, null, new ShardPath(tempDir, tempDir, "foo", new ShardId("foo", 0))); + try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { + switch (type) { + case NIOFS: + assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory); + break; + case MMAPFS: + assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory); + break; + case SIMPLEFS: + assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory); + break; + case FS: + case DEFAULT: + if (Constants.WINDOWS) { + if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory); + } else { + assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory); + } + } else if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { + assertTrue(type + " " + directory.toString(), directory instanceof FileSwitchDirectory); + } else { + assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory); + } + break; + } + } + } + + public void testStoreDirectoryDefault() throws IOException { + final Path tempDir = createTempDir(); + Settings settings = ImmutableSettings.EMPTY; + FsDirectoryService service = new FsDirectoryService(settings, null, new ShardPath(tempDir, tempDir, "foo", new ShardId("foo", 0))); + try (final Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { + if (Constants.WINDOWS) { + assertTrue(directory.toString(), directory instanceof MMapDirectory || directory instanceof SimpleFSDirectory); + } else { + assertTrue(directory.toString(), directory instanceof FileSwitchDirectory); + } + } + } + + +} diff --git a/src/test/java/org/elasticsearch/index/store/StoreTest.java b/src/test/java/org/elasticsearch/index/store/StoreTest.java index 3a60b62ea3a..c4e7ae9a7ea 100644 --- a/src/test/java/org/elasticsearch/index/store/StoreTest.java +++ b/src/test/java/org/elasticsearch/index/store/StoreTest.java @@ -270,7 +270,7 @@ public class StoreTest extends ElasticsearchTestCase { Store.LegacyChecksums checksums = new Store.LegacyChecksums(); Map legacyMeta = new HashMap<>(); for (String file : store.directory().listAll()) { - if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN)) { + if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) { continue; } BytesRef hash = new BytesRef(); diff --git a/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java b/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java index 1273b255fc1..18a9e382d9f 100644 --- a/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java +++ b/src/test/java/org/elasticsearch/index/suggest/stats/SuggestStatsTests.java @@ -39,12 +39,14 @@ import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; +import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSuccessful; import static org.hamcrest.Matchers.*; /** */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SuggestStatsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java index e24e992c5a3..1a5aa984455 100644 --- a/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java +++ b/src/test/java/org/elasticsearch/index/translog/AbstractSimpleTranslogTests.java @@ -332,6 +332,18 @@ public abstract class AbstractSimpleTranslogTests extends ElasticsearchTestCase snapshot.close(); } + public void testSnapshotOnClosedTranslog() throws IOException { + assertTrue(Files.exists(translogDir.resolve("translog-1"))); + translog.add(new Translog.Create("test", "1", new byte[]{1})); + translog.close(); + try { + Translog.Snapshot snapshot = translog.snapshot(); + fail("translog is closed"); + } catch (TranslogException ex) { + assertEquals(ex.getMessage(), "current translog is already closed"); + } + } + @Test public void deleteOnRollover() throws IOException { translog.add(new Translog.Create("test", "1", new byte[]{1})); diff --git a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java index d5798aa94f1..4c422448ede 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/IndicesOptionsIntegrationTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequestBuilder; @@ -39,7 +38,6 @@ import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.validate.query.ValidateQueryRequestBuilder; import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersRequestBuilder; import org.elasticsearch.action.count.CountRequestBuilder; -import org.elasticsearch.action.deletebyquery.DeleteByQueryRequestBuilder; import org.elasticsearch.action.percolate.MultiPercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateSourceBuilder; @@ -85,7 +83,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2"), true); verify(aliasExists("test1", "test2"), true); verify(typesExists("test1", "test2"), true); - verify(deleteByQuery("test1", "test2"), true); verify(percolate("test1", "test2"), true); verify(mpercolate(null, "test1", "test2"), false); verify(suggest("test1", "test2"), true); @@ -108,7 +105,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2").setIndicesOptions(options), true); verify(aliasExists("test1", "test2").setIndicesOptions(options), true); verify(typesExists("test1", "test2").setIndicesOptions(options), true); - verify(deleteByQuery("test1", "test2").setIndicesOptions(options), true); verify(percolate("test1", "test2").setIndicesOptions(options), true); verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false); verify(suggest("test1", "test2").setIndicesOptions(options), true); @@ -131,7 +127,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2").setIndicesOptions(options), false); verify(aliasExists("test1", "test2").setIndicesOptions(options), false); verify(typesExists("test1", "test2").setIndicesOptions(options), false); - verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false); verify(percolate("test1", "test2").setIndicesOptions(options), false); verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false); verify(suggest("test1", "test2").setIndicesOptions(options), false); @@ -156,7 +151,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1", "test2").setIndicesOptions(options), false); verify(aliasExists("test1", "test2").setIndicesOptions(options), false); verify(typesExists("test1", "test2").setIndicesOptions(options), false); - verify(deleteByQuery("test1", "test2").setIndicesOptions(options), false); verify(percolate("test1", "test2").setIndicesOptions(options), false); verify(mpercolate(options, "test1", "test2").setIndicesOptions(options), false); verify(suggest("test1", "test2").setIndicesOptions(options), false); @@ -190,7 +184,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), true); verify(aliasExists("test1").setIndicesOptions(options), true); verify(typesExists("test1").setIndicesOptions(options), true); - verify(deleteByQuery("test1").setIndicesOptions(options), true); verify(percolate("test1").setIndicesOptions(options), true); verify(mpercolate(options, "test1").setIndicesOptions(options), true); verify(suggest("test1").setIndicesOptions(options), true); @@ -213,7 +206,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(mpercolate(options, "test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); @@ -239,7 +231,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(mpercolate(options, "test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); @@ -265,7 +256,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), true); verify(aliasExists("test1").setIndicesOptions(options), true); verify(typesExists("test1").setIndicesOptions(options), true); - verify(deleteByQuery("test1").setIndicesOptions(options), true); verify(percolate("test1").setIndicesOptions(options), true); verify(suggest("test1").setIndicesOptions(options), true); verify(getAliases("test1").setIndicesOptions(options), true); @@ -287,7 +277,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); @@ -312,7 +301,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery("test1").setIndicesOptions(options), false); verify(aliasExists("test1").setIndicesOptions(options), false); verify(typesExists("test1").setIndicesOptions(options), false); - verify(deleteByQuery("test1").setIndicesOptions(options), false); verify(percolate("test1").setIndicesOptions(options), false); verify(suggest("test1").setIndicesOptions(options), false); verify(getAliases("test1").setIndicesOptions(options), false); @@ -369,7 +357,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices), true); verify(aliasExists(indices), false); verify(typesExists(indices), false); - verify(deleteByQuery(indices), true); verify(percolate(indices), false); verify(mpercolate(null, indices), false); verify(suggest(indices), false); @@ -393,7 +380,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices).setIndicesOptions(options), false); verify(aliasExists(indices).setIndicesOptions(options), false); verify(typesExists(indices).setIndicesOptions(options), false); - verify(deleteByQuery(indices).setIndicesOptions(options), false); verify(percolate(indices).setIndicesOptions(options), false); verify(mpercolate(options, indices), false); verify(suggest(indices).setIndicesOptions(options), false); @@ -420,7 +406,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices), false); verify(aliasExists(indices), false); verify(typesExists(indices), false); - verify(deleteByQuery(indices), false); verify(percolate(indices), false); verify(mpercolate(null, indices), false); verify(suggest(indices), false); @@ -444,7 +429,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices), true); verify(aliasExists(indices), false); verify(typesExists(indices), false); - verify(deleteByQuery(indices), true); verify(percolate(indices), false); verify(mpercolate(null, indices), false); verify(suggest(indices), false); @@ -468,7 +452,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest verify(validateQuery(indices).setIndicesOptions(options), false); verify(aliasExists(indices).setIndicesOptions(options), false); verify(typesExists(indices).setIndicesOptions(options), false); - verify(deleteByQuery(indices).setIndicesOptions(options), false); verify(percolate(indices).setIndicesOptions(options), false); verify(mpercolate(options, indices), false); verify(suggest(indices).setIndicesOptions(options), false); @@ -731,25 +714,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest assertThat(client().admin().indices().prepareGetWarmers("barbaz").setWarmers("test1").get().getWarmers().size(), equalTo(0)); } - @Test - // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false) - public void testIndicesExists() throws Exception { - assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false)); - assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false)); - assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); - - createIndex("foo", "foobar", "bar", "barbaz"); - ensureYellow(); - - assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true)); - assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true)); - } - @Test public void testPutMapping() throws Exception { verify(client().admin().indices().preparePutMapping("foo").setType("type1").setSource("field", "type=string"), true); @@ -809,7 +773,7 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest assertAcked(client().admin().indices().prepareOpen("_all").get()); try { verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(ImmutableSettings.builder().put("e", "f")), false); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices[[barbaz]]")); } verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(ImmutableSettings.builder().put("a", "b")), true); @@ -867,10 +831,6 @@ public class IndicesOptionsIntegrationTests extends ElasticsearchIntegrationTest return client().admin().indices().prepareTypesExists(indices).setTypes("dummy"); } - private static DeleteByQueryRequestBuilder deleteByQuery(String... indices) { - return client().prepareDeleteByQuery(indices).setQuery(boolQuery().mustNot(matchAllQuery())); - } - private static PercolateRequestBuilder percolate(String... indices) { return client().preparePercolate().setIndices(indices) .setSource(new PercolateSourceBuilder().setDoc(docBuilder().setDoc("k", "v"))) diff --git a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java index 0488c02041e..415d806278f 100644 --- a/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java +++ b/src/test/java/org/elasticsearch/indices/IndicesServiceTest.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices; import org.apache.lucene.store.LockObtainFailedException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -74,7 +73,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { try { indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); fail(); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // all good } @@ -101,7 +100,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { try { indicesService.deleteIndexStore("boom", secondMetaData, clusterService.state()); fail(); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // all good } @@ -111,7 +110,7 @@ public class IndicesServiceTest extends ElasticsearchSingleNodeTest { try { indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); fail(); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { // all good } assertAcked(client().admin().indices().prepareOpen("test")); diff --git a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java index 67548a83e3b..bf169d254e6 100644 --- a/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java +++ b/src/test/java/org/elasticsearch/indices/analyze/AnalyzeActionTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.analyze; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequest; import org.elasticsearch.action.admin.indices.analyze.AnalyzeRequestBuilder; @@ -53,36 +52,40 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { assertThat(token.getTerm(), equalTo("this")); assertThat(token.getStartOffset(), equalTo(0)); assertThat(token.getEndOffset(), equalTo(4)); + assertThat(token.getPosition(), equalTo(0)); token = analyzeResponse.getTokens().get(1); assertThat(token.getTerm(), equalTo("is")); assertThat(token.getStartOffset(), equalTo(5)); assertThat(token.getEndOffset(), equalTo(7)); + assertThat(token.getPosition(), equalTo(1)); token = analyzeResponse.getTokens().get(2); assertThat(token.getTerm(), equalTo("a")); assertThat(token.getStartOffset(), equalTo(8)); assertThat(token.getEndOffset(), equalTo(9)); + assertThat(token.getPosition(), equalTo(2)); token = analyzeResponse.getTokens().get(3); assertThat(token.getTerm(), equalTo("test")); assertThat(token.getStartOffset(), equalTo(10)); assertThat(token.getEndOffset(), equalTo(14)); + assertThat(token.getPosition(), equalTo(3)); } } @Test - public void analyzeNumericField() throws ElasticsearchException, IOException { + public void analyzeNumericField() throws IOException { assertAcked(prepareCreate("test").addAlias(new Alias("alias")).addMapping("test", "long", "type=long", "double", "type=double")); ensureGreen("test"); try { client().admin().indices().prepareAnalyze(indexOrAlias(), "123").setField("long").get(); fail("shouldn't get here"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { //all good } try { client().admin().indices().prepareAnalyze(indexOrAlias(), "123.0").setField("double").get(); fail("shouldn't get here"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { //all good } } @@ -107,6 +110,14 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { assertThat(token.getTerm(), equalTo("a")); token = analyzeResponse.getTokens().get(3); assertThat(token.getTerm(), equalTo("tset")); + + analyzeResponse = client().admin().indices().prepareAnalyze("of course").setTokenizer("standard").setTokenFilters("stop").get(); + assertThat(analyzeResponse.getTokens().size(), equalTo(1)); + assertThat(analyzeResponse.getTokens().get(0).getTerm(), equalTo("course")); + assertThat(analyzeResponse.getTokens().get(0).getPosition(), equalTo(1)); + assertThat(analyzeResponse.getTokens().get(0).getStartOffset(), equalTo(3)); + assertThat(analyzeResponse.getTokens().get(0).getEndOffset(), equalTo(9)); + } @Test @@ -223,7 +234,7 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest); fail("shouldn't get here"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); } } @@ -242,7 +253,7 @@ public class AnalyzeActionTests extends ElasticsearchIntegrationTest { RestAnalyzeAction.buildFromContent(invalidContent, analyzeRequest); fail("shouldn't get here"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } } diff --git a/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java b/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java index 36bdfd15af6..9013156a59b 100644 --- a/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java +++ b/src/test/java/org/elasticsearch/indices/cache/query/IndicesQueryCacheTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.cache.query; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; +import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; diff --git a/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java b/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java new file mode 100644 index 00000000000..b0a549f3cc7 --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/exists/indices/IndicesExistsTests.java @@ -0,0 +1,80 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.exists.indices; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.hamcrest.Matchers.equalTo; + +public class IndicesExistsTests extends ElasticsearchIntegrationTest { + + @Test + // Indices exists never throws IndexMissingException, the indices options control its behaviour (return true or false) + public void testIndicesExists() throws Exception { + assertThat(client().admin().indices().prepareExists("foo").get().isExists(), equalTo(false)); + assertThat(client().admin().indices().prepareExists("foo").setIndicesOptions(IndicesOptions.lenientExpandOpen()).get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(false)); + assertThat(client().admin().indices().prepareExists("foo*").setIndicesOptions(IndicesOptions.fromOptions(false, true, true, false)).get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(false)); + + createIndex("foo", "foobar", "bar", "barbaz"); + ensureYellow(); + + assertThat(client().admin().indices().prepareExists("foo*").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("foobar").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("bar*").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("bar").get().isExists(), equalTo(true)); + assertThat(client().admin().indices().prepareExists("_all").get().isExists(), equalTo(true)); + } + + @Test + public void testIndicesExistsWithBlocks() { + createIndex("ro"); + ensureYellow(); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("ro", blockSetting); + assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true)); + } finally { + disableIndexBlock("ro", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + assertThat(client().admin().indices().prepareExists("ro").execute().actionGet().isExists(), equalTo(true)); + fail("Exists should fail when " + IndexMetaData.SETTING_BLOCKS_METADATA + " is true"); + } catch (ClusterBlockException e) { + // Ok, a ClusterBlockException is expected + } finally { + disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java index 778723be68c..f72609298e4 100644 --- a/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java +++ b/src/test/java/org/elasticsearch/indices/exists/types/TypesExistsTests.java @@ -21,11 +21,18 @@ package org.elasticsearch.indices.exists.types; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; +import java.io.IOException; +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; public class TypesExistsTests extends ElasticsearchIntegrationTest { @@ -69,4 +76,27 @@ public class TypesExistsTests extends ElasticsearchIntegrationTest { assertThat(response.isExists(), equalTo(false)); } + @Test + public void testTypesExistsWithBlocks() throws IOException { + assertAcked(prepareCreate("ro").addMapping("type1", jsonBuilder().startObject().startObject("type1").endObject().endObject())); + ensureGreen("ro"); + + // Request is not blocked + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("ro", block); + assertThat(client().admin().indices().prepareTypesExists("ro").setTypes("type1").execute().actionGet().isExists(), equalTo(true)); + } finally { + disableIndexBlock("ro", block); + } + } + + // Request is blocked + try { + enableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareTypesExists("ro").setTypes("type1")); + } finally { + disableIndexBlock("ro", IndexMetaData.SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java index 28bcd9f63d9..ca0b1a52029 100644 --- a/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java +++ b/src/test/java/org/elasticsearch/indices/leaks/IndicesLeaksTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices.leaks; -import org.apache.lucene.util.LuceneTestCase.BadApple; import org.elasticsearch.common.inject.Injector; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.mapper.DocumentMapper; @@ -28,7 +27,6 @@ import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.junit.Test; import java.lang.ref.WeakReference; @@ -92,7 +90,6 @@ public class IndicesLeaksTests extends ElasticsearchIntegrationTest { shardInjector = null; cluster().wipeIndices("test"); - MockDirectoryHelper.wrappers.clear(); // we need to clear this to allow the objects to recycle for (int i = 0; i < 100; i++) { System.gc(); diff --git a/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java index 50bbd8e9e2d..28bcde323d2 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/ConcurrentDynamicTemplateTests.java @@ -46,7 +46,6 @@ public class ConcurrentDynamicTemplateTests extends ElasticsearchIntegrationTest private final String mappingType = "test-mapping"; @Test // see #3544 - @AwaitsFix(bugUrl = "adrien is looking into this") public void testConcurrentDynamicMapping() throws Exception { final String fieldName = "field"; final String mapping = "{ \"" + mappingType + "\": {" + diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java index e718c8b318d..119157bcfc1 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetFieldMappingsTests.java @@ -21,16 +21,21 @@ package org.elasticsearch.indices.mapping; import com.google.common.collect.Maps; import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse; -import org.elasticsearch.common.xcontent.*; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.hamcrest.Matchers; import org.junit.Test; import java.io.IOException; +import java.util.Arrays; import java.util.Map; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.*; public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest { @@ -174,4 +179,29 @@ public class SimpleGetFieldMappingsTests extends ElasticsearchIntegrationTest { assertThat(responseStrings, not(equalTo(prettyJsonBuilder.string()))); } + + @Test + public void testGetFieldMappingsWithBlocks() throws Exception { + assertAcked(prepareCreate("test") + .addMapping("typeA", getMappingForType("typeA")) + .addMapping("typeB", getMappingForType("typeB"))); + ensureYellow(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test", block); + GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("typeA").setFields("field1", "obj.subfield").get(); + assertThat(response.fieldMappings("test", "typeA", "field1").fullName(), equalTo("field1")); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetMappings(), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java index adfd37a0c15..4757aea8600 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/SimpleGetMappingsTests.java @@ -28,8 +28,11 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; import java.io.IOException; +import java.util.Arrays; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -143,4 +146,30 @@ public class SimpleGetMappingsTests extends ElasticsearchIntegrationTest { assertThat(response.mappings().get("indexb").get("Btype"), notNullValue()); } + @Test + public void testGetMappingsWithBlocks() throws IOException { + client().admin().indices().prepareCreate("test") + .addMapping("typeA", getMappingForType("typeA")) + .addMapping("typeB", getMappingForType("typeB")) + .execute().actionGet(); + ensureGreen(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test", block); + GetMappingsResponse response = client().admin().indices().prepareGetMappings().execute().actionGet(); + assertThat(response.mappings().size(), equalTo(1)); + assertThat(response.mappings().get("test").size(), equalTo(2)); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetMappings(), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java similarity index 92% rename from src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java rename to src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java index d225c936121..11638c74660 100644 --- a/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingTests.java +++ b/src/test/java/org/elasticsearch/indices/mapping/UpdateMappingIntegrationTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.mapping; import com.google.common.collect.Lists; +import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.refresh.RefreshResponse; @@ -28,6 +29,7 @@ import org.elasticsearch.action.count.CountResponse; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -41,19 +43,20 @@ import org.hamcrest.Matchers; import org.junit.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.atomic.AtomicBoolean; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.*; @ClusterScope(randomDynamicTemplates = false) -public class UpdateMappingTests extends ElasticsearchIntegrationTest { +public class UpdateMappingIntegrationTests extends ElasticsearchIntegrationTest { @Test public void dynamicUpdates() throws Exception { @@ -212,13 +215,13 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { @SuppressWarnings("unchecked") @Test - public void updateIncludeExclude() throws Exception { - assertAcked(prepareCreate("test").addMapping("type", - jsonBuilder().startObject().startObject("type").startObject("properties") - .startObject("normal").field("type", "long").endObject() - .startObject("exclude").field("type", "long").endObject() - .startObject("include").field("type", "long").endObject() - .endObject().endObject().endObject())); + public void updateIncludeExcludeBackcompat() throws Exception { + assertAcked(prepareCreate("test").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) + .addMapping("type", jsonBuilder().startObject().startObject("type").startObject("properties") + .startObject("normal").field("type", "long").endObject() + .startObject("exclude").field("type", "long").endObject() + .startObject("include").field("type", "long").endObject() + .endObject().endObject().endObject())); ensureGreen(); // make sure that replicas are initialized so the refresh command will work them too logger.info("Index doc"); @@ -228,7 +231,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { ); refresh(); // commit it for later testing. - logger.info("Adding exclude settings"); PutMappingResponse putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource( JsonXContent.contentBuilder().startObject().startObject("type") @@ -258,7 +260,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); - logger.info("Changing mapping to includes"); putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource( JsonXContent.contentBuilder().startObject().startObject("type") @@ -277,7 +278,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat((Map) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); assertThat((ArrayList) ((Map) typeMapping.getSourceAsMap().get("_source")).get("excludes"), emptyIterable()); - logger.info("Indexing doc yet again"); index("test", "type", "1", JsonXContent.contentBuilder().startObject() .field("normal", 3).field("exclude", 3).field("include", 3) @@ -289,7 +289,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat(getResponse.getSource(), not(hasKey("exclude"))); assertThat(getResponse.getSource(), hasKey("include")); - logger.info("Adding excludes, but keep includes"); putResponse = client().admin().indices().preparePutMapping("test").setType("type").setSource( JsonXContent.contentBuilder().startObject().startObject("type") @@ -307,8 +306,6 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { assertThat((Map) typeMapping.getSourceAsMap().get("_source"), hasKey("excludes")); ArrayList excludes = (ArrayList) ((Map) typeMapping.getSourceAsMap().get("_source")).get("excludes"); assertThat(excludes, contains("*.excludes")); - - } @SuppressWarnings("unchecked") @@ -440,4 +437,28 @@ public class UpdateMappingTests extends ElasticsearchIntegrationTest { } } + + @Test + public void testPutMappingsWithBlocks() throws Exception { + createIndex("test"); + ensureGreen(); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", block); + assertAcked(client().admin().indices().preparePutMapping("test").setType("doc").setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")); + } finally { + disableIndexBlock("test", block); + } + } + + for (String block : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", block); + assertBlocked(client().admin().indices().preparePutMapping("test").setType("doc").setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}")); + } finally { + disableIndexBlock("test", block); + } + } + } } diff --git a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java index 311449aaece..f09562f690e 100644 --- a/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java +++ b/src/test/java/org/elasticsearch/indices/memory/breaker/CircuitBreakerServiceTests.java @@ -241,8 +241,8 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { fail("should have thrown an exception"); } catch (Exception e) { String errMsg = "[fielddata] Data too large, data for [test] would be larger than limit of [10/10b]"; - assertThat("Exception: " + ExceptionsHelper.unwrapCause(e) + " should contain a CircuitBreakingException", - ExceptionsHelper.unwrapCause(e).getMessage().contains(errMsg), equalTo(true)); + assertThat("Exception: " + e.toString() + " should contain a CircuitBreakingException", + e.toString().contains(errMsg), equalTo(true)); } assertFailures(client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC), @@ -263,8 +263,8 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { fail("should have thrown an exception"); } catch (Exception e) { String errMsg = "[parent] Data too large, data for [test] would be larger than limit of [15/15b]"; - assertThat("Exception: " + ExceptionsHelper.unwrapCause(e) + " should contain a CircuitBreakingException", - ExceptionsHelper.unwrapCause(e).getMessage().contains(errMsg), equalTo(true)); + assertThat("Exception: " +e.toString() + " should contain a CircuitBreakingException", + e.toString().contains(errMsg), equalTo(true)); } } @@ -297,8 +297,8 @@ public class CircuitBreakerServiceTests extends ElasticsearchIntegrationTest { fail("aggregation should have tripped the breaker"); } catch (Exception e) { String errMsg = "CircuitBreakingException[[request] Data too large, data for [] would be larger than limit of [10/10b]]"; - assertThat("Exception: " + ExceptionsHelper.unwrapCause(e) + " should contain a CircuitBreakingException", - ExceptionsHelper.unwrapCause(e).getMessage().contains(errMsg), equalTo(true)); + assertThat("Exception: " + e.toString() + " should contain a CircuitBreakingException", + e.toString().contains(errMsg), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java b/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java index 113ac6d80b7..842a6c533dc 100644 --- a/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java +++ b/src/test/java/org/elasticsearch/indices/recovery/IndexRecoveryTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.recovery; -import com.carrotsearch.randomizedtesting.LifecycleScope; import com.google.common.util.concurrent.ListenableFuture; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; @@ -52,7 +51,7 @@ import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.store.MockDirectoryHelper; +import org.elasticsearch.test.store.MockFSDirectoryService; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.*; import org.junit.Test; @@ -522,7 +521,7 @@ public class IndexRecoveryTests extends ElasticsearchIntegrationTest { .put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT, "1s") .put("cluster.routing.schedule", "100ms") // aggressive reroute post shard failures .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, MockTransportService.class.getName()) - .put(MockDirectoryHelper.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again + .put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE, false) // restarted recoveries will delete temp files and write them again .build(); // start a master node internalCluster().startNode(nodeSettings); diff --git a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java index e1b5c04368b..cc8e7c5fae8 100644 --- a/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java +++ b/src/test/java/org/elasticsearch/indices/recovery/RecoveryStateTest.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices.recovery; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; @@ -344,7 +343,7 @@ public class RecoveryStateTest extends ElasticsearchTestCase { state.setStage(stage); } fail("succeeded in performing the illegal sequence [" + Strings.arrayToCommaDelimitedString(stages) + "]"); - } catch (ElasticsearchIllegalStateException e) { + } catch (IllegalStateException e) { // cool } diff --git a/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java b/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java new file mode 100644 index 00000000000..7ecf5eb465d --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/settings/GetSettingsBlocksTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.settings; + +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + +public class GetSettingsBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testGetSettingsWithBlocks() throws Exception { + assertAcked(prepareCreate("test") + .setSettings(ImmutableSettings.settingsBuilder() + .put("index.refresh_interval", -1) + .put("index.merge.policy.expunge_deletes_allowed", "30") + .put("index.mapper.dynamic", false))); + + for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test", block); + GetSettingsResponse response = client().admin().indices().prepareGetSettings("test").get(); + assertThat(response.getIndexToSettings().size(), greaterThanOrEqualTo(1)); + assertThat(response.getSetting("test", "index.refresh_interval"), equalTo("-1")); + assertThat(response.getSetting("test", "index.merge.policy.expunge_deletes_allowed"), equalTo("30")); + assertThat(response.getSetting("test", "index.mapper.dynamic"), equalTo("false")); + } finally { + disableIndexBlock("test", block); + } + } + + try { + enableIndexBlock("test", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetSettings("test")); + } finally { + disableIndexBlock("test", SETTING_BLOCKS_METADATA); + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java index 74776657270..a12e35a92ee 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateNumberOfReplicasTests.java @@ -22,7 +22,9 @@ package org.elasticsearch.indices.settings; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.count.CountResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -35,6 +37,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcke import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest { @Override @@ -263,4 +266,20 @@ public class UpdateNumberOfReplicasTests extends ElasticsearchIntegrationTest { assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(3)); assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 4)); } + + @Test + public void testUpdateWithInvalidNumberOfReplicas() { + createIndex("test"); + try { + client().admin().indices().prepareUpdateSettings("test") + .setSettings(ImmutableSettings.settingsBuilder() + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(-10, -1)) + ) + .execute().actionGet(); + fail("should have thrown an exception about the replica shard count"); + } catch (IllegalArgumentException e) { + assertThat("message contains error about shard count: " + e.getMessage(), + e.getMessage().contains("the value of the setting index.number_of_replicas must be a non negative integer"), equalTo(true)); + } + } } diff --git a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java index 38a7a9d09ae..fad19ed7b28 100644 --- a/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java +++ b/src/test/java/org/elasticsearch/indices/settings/UpdateSettingsTests.java @@ -23,8 +23,6 @@ import org.apache.log4j.AppenderSkeleton; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; -import org.apache.lucene.util.LuceneTestCase.Slow; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -32,17 +30,20 @@ import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.merge.scheduler.MergeSchedulerModule; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.store.support.AbstractIndexStore; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertThrows; +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.nullValue; @@ -59,7 +60,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { ) .execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // all is well } @@ -134,7 +135,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { // No throttling at first, only 1 non-replicated shard, force lots of merging: assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "none") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "none") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2") @@ -172,13 +173,13 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { .indices() .prepareUpdateSettings("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, "1mb")) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") + .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, "1mb")) .get(); // Make sure setting says it is in fact changed: GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get(); - assertThat(getSettingsResponse.getSetting("test", AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE), equalTo("merge")); + assertThat(getSettingsResponse.getSetting("test", IndexStore.INDEX_STORE_THROTTLE_TYPE), equalTo("merge")); // Also make sure we see throttling kicking in: boolean done = false; @@ -212,7 +213,7 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { .indices() .prepareUpdateSettings("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "none")) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "none")) .get(); // Optimize does a waitForMerges, which we must do to make sure all in-flight (throttled) merges finish: @@ -384,4 +385,31 @@ public class UpdateSettingsTests extends ElasticsearchIntegrationTest { rootLogger.setLevel(savedLevel); } } + + @Test + public void testUpdateSettingsWithBlocks() { + createIndex("test"); + ensureGreen("test"); + + Settings.Builder builder = ImmutableSettings.builder().put("index.refresh_interval", -1); + + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Closing an index is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareUpdateSettings("test").setSettings(builder)); + } finally { + disableIndexBlock("test", blockSetting); + } + } + } } diff --git a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java index 3d9e676d545..b2308b15a8c 100644 --- a/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java +++ b/src/test/java/org/elasticsearch/indices/state/CloseIndexDisableCloseAllTests.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.indices.state; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.admin.indices.close.CloseIndexResponse; @@ -54,35 +53,35 @@ public class CloseIndexDisableCloseAllTests extends ElasticsearchIntegrationTest try { client().admin().indices().prepareClose("_all").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("*").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("test*").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("*", "-test1").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } // Close all wildcard try { client().admin().indices().prepareClose("*", "-test1", "+test1").execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test3", "test2").execute().actionGet(); diff --git a/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java index 8b7d7932298..4ba97227750 100644 --- a/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java +++ b/src/test/java/org/elasticsearch/indices/state/OpenCloseIndexTests.java @@ -30,21 +30,18 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.indices.IndexMissingException; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.Test; import java.io.IOException; +import java.util.Arrays; import java.util.concurrent.ExecutionException; -import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.notNullValue; @@ -338,5 +335,58 @@ public class OpenCloseIndexTests extends ElasticsearchIntegrationTest { assertHitCount(searchResponse, docs); } + @Test + public void testOpenCloseIndexWithBlocks() { + createIndex("test"); + ensureGreen("test"); + int docs = between(10, 100); + for (int i = 0; i < docs ; i++) { + client().prepareIndex("test", "type", "" + i).setSource("test", "init").execute().actionGet(); + } + + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + enableIndexBlock("test", blockSetting); + + // Closing an index is not blocked + CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet(); + assertAcked(closeIndexResponse); + assertIndexIsClosed("test"); + + // Opening an index is not blocked + OpenIndexResponse openIndexResponse = client().admin().indices().prepareOpen("test").execute().actionGet(); + assertAcked(openIndexResponse); + assertIndexIsOpened("test"); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + // Closing an index is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareClose("test")); + assertIndexIsOpened("test"); + } finally { + disableIndexBlock("test", blockSetting); + } + } + + CloseIndexResponse closeIndexResponse = client().admin().indices().prepareClose("test").execute().actionGet(); + assertAcked(closeIndexResponse); + assertIndexIsClosed("test"); + + // Opening an index is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + enableIndexBlock("test", blockSetting); + assertBlocked(client().admin().indices().prepareOpen("test")); + assertIndexIsClosed("test"); + } finally { + disableIndexBlock("test", blockSetting); + } + } + } } \ No newline at end of file diff --git a/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java b/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java index 2875a90824d..a46a273b2ba 100644 --- a/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java +++ b/src/test/java/org/elasticsearch/indices/state/RareClusterStateTests.java @@ -20,6 +20,10 @@ package org.elasticsearch.indices.state; import com.google.common.collect.ImmutableMap; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; +import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; @@ -27,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; @@ -35,6 +40,10 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.DiscoverySettings; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.mapper.DocumentMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.disruption.BlockClusterStateProcessing; import org.elasticsearch.test.junit.annotations.TestLogging; @@ -42,10 +51,16 @@ import org.junit.Test; import java.util.Arrays; import java.util.HashSet; +import java.util.List; import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.instanceOf; /** */ @@ -115,4 +130,118 @@ public class RareClusterStateTests extends ElasticsearchIntegrationTest { assertHitCount(client().prepareSearch("test").get(), 0); } + public void testDelayedMappingPropagationOnReplica() throws Exception { + // Here we want to test that everything goes well if the mappings that + // are needed for a document are not available on the replica at the + // time of indexing it + final List nodeNames = internalCluster().startNodesAsync(2).get(); + assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes("2").get().isTimedOut()); + + final String master = internalCluster().getMasterName(); + assertThat(nodeNames, hasItem(master)); + String otherNode = null; + for (String node : nodeNames) { + if (node.equals(master) == false) { + otherNode = node; + break; + } + } + assertNotNull(otherNode); + + // Force allocation of the primary on the master node by first only allocating on the master + // and then allowing all nodes so that the replica gets allocated on the other node + assertAcked(prepareCreate("index").setSettings(ImmutableSettings.builder() + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) + .put("index.routing.allocation.include._name", master)).get()); + assertAcked(client().admin().indices().prepareUpdateSettings("index").setSettings(ImmutableSettings.builder() + .put("index.routing.allocation.include._name", "")).get()); + ensureGreen(); + + // Check routing tables + ClusterState state = client().admin().cluster().prepareState().get().getState(); + assertEquals(master, state.nodes().masterNode().name()); + List shards = state.routingTable().allShards("index"); + assertThat(shards, hasSize(2)); + for (ShardRouting shard : shards) { + if (shard.primary()) { + // primary must be on the master + assertEquals(state.nodes().masterNodeId(), shard.currentNodeId()); + } else { + assertTrue(shard.active()); + } + } + + // Block cluster state processing on the replica + BlockClusterStateProcessing disruption = new BlockClusterStateProcessing(otherNode, getRandom()); + internalCluster().setDisruptionScheme(disruption); + disruption.startDisrupting(); + final AtomicReference putMappingResponse = new AtomicReference<>(); + client().admin().indices().preparePutMapping("index").setType("type").setSource("field", "type=long").execute(new ActionListener() { + @Override + public void onResponse(PutMappingResponse response) { + putMappingResponse.set(response); + } + @Override + public void onFailure(Throwable e) { + putMappingResponse.set(e); + } + }); + // Wait for mappings to be available on master + assertBusy(new Runnable() { + @Override + public void run() { + final IndicesService indicesService = internalCluster().getInstance(IndicesService.class, master); + final IndexService indexService = indicesService.indexServiceSafe("index"); + assertNotNull(indexService); + final MapperService mapperService = indexService.mapperService(); + DocumentMapper mapper = mapperService.documentMapper("type"); + assertNotNull(mapper); + assertNotNull(mapper.mappers().getMapper("field")); + } + }); + + final AtomicReference docIndexResponse = new AtomicReference<>(); + client().prepareIndex("index", "type", "1").setSource("field", 42).execute(new ActionListener() { + @Override + public void onResponse(IndexResponse response) { + docIndexResponse.set(response); + } + @Override + public void onFailure(Throwable e) { + docIndexResponse.set(e); + } + }); + + // Wait for document to be indexed on primary + assertBusy(new Runnable() { + @Override + public void run() { + assertTrue(client().prepareGet("index", "type", "1").setPreference("_primary").get().isExists()); + } + }); + + // The mappings have not been propagated to the replica yet as a consequence the document count not be indexed + // We wait on purpose to make sure that the document is not indexed because the shard operation is stalled + // and not just because it takes time to replicate the indexing request to the replica + Thread.sleep(100); + assertThat(putMappingResponse.get(), equalTo(null)); + assertThat(docIndexResponse.get(), equalTo(null)); + + // Now make sure the indexing request finishes successfully + disruption.stopDisrupting(); + assertBusy(new Runnable() { + @Override + public void run() { + assertThat(putMappingResponse.get(), instanceOf(PutMappingResponse.class)); + PutMappingResponse resp = (PutMappingResponse) putMappingResponse.get(); + assertTrue(resp.isAcknowledged()); + assertThat(docIndexResponse.get(), instanceOf(IndexResponse.class)); + IndexResponse docResp = (IndexResponse) docIndexResponse.get(); + assertEquals(Arrays.toString(docResp.getShardInfo().getFailures()), + 2, docResp.getShardInfo().getTotal()); // both shards should have succeeded + } + }); + } + } diff --git a/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java index 962a7eb028d..bb88f994bae 100644 --- a/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java +++ b/src/test/java/org/elasticsearch/indices/state/SimpleIndexStateTests.java @@ -44,6 +44,7 @@ import static org.hamcrest.Matchers.nullValue; /** * */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SimpleIndexStateTests extends ElasticsearchIntegrationTest { private final ESLogger logger = Loggers.getLogger(SimpleIndexStateTests.class); diff --git a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java index df7ba00d390..d992991fa18 100644 --- a/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java +++ b/src/test/java/org/elasticsearch/indices/stats/IndexStatsTests.java @@ -19,12 +19,13 @@ package org.elasticsearch.indices.stats; -import org.apache.lucene.util.Version; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; +import org.apache.lucene.util.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags.Flag; +import org.elasticsearch.action.admin.indices.stats.IndexStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -39,12 +40,14 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; +import org.elasticsearch.index.cache.filter.FilterCacheStats; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.merge.policy.TieredMergePolicyProvider; import org.elasticsearch.index.merge.scheduler.ConcurrentMergeSchedulerProvider; import org.elasticsearch.index.query.FilterBuilders; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -59,9 +62,7 @@ import java.util.Random; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; -import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.is; @@ -77,39 +78,12 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { //Filter/Query cache is cleaned periodically, default is 60s, so make sure it runs often. Thread.sleep for 60s is bad return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put("indices.cache.filter.clean_interval", "1ms") .put(IndicesQueryCache.INDICES_CACHE_QUERY_CLEAN_INTERVAL, "1ms") .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .build(); } - @Test - public void testClearCacheFilterKeys() { - client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); - ensureGreen(); - client().prepareIndex("test", "type", "1").setSource("field", "value").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - - NodesStatsResponse nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - IndicesStatsResponse indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - - SearchResponse searchResponse = client().prepareSearch().setQuery(filteredQuery(matchAllQuery(), FilterBuilders.termFilter("field", "value").cacheKey("test_key"))).execute().actionGet(); - assertThat(searchResponse.getHits().getHits().length, equalTo(1)); - nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); - indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(0l)); - - client().admin().indices().prepareClearCache().setFilterKeys("test_key").execute().actionGet(); - nodesStats = client().admin().cluster().prepareNodesStats().setIndices(true).execute().actionGet(); - assertThat(nodesStats.getNodes()[0].getIndices().getFilterCache().getMemorySizeInBytes() + nodesStats.getNodes()[1].getIndices().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - indicesStats = client().admin().indices().prepareStats("test").clear().setFilterCache(true).execute().actionGet(); - assertThat(indicesStats.getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(0l)); - } - @Test public void testFieldDataStats() { client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.number_of_shards", 2)).execute().actionGet(); @@ -308,7 +282,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { public void nonThrottleStats() throws Exception { assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2") @@ -341,7 +315,7 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { public void throttleStats() throws Exception { assertAcked(prepareCreate("test") .setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "merge") .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, "1") .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0") .put(TieredMergePolicyProvider.INDEX_MERGE_POLICY_MAX_MERGE_AT_ONCE, "2") @@ -991,4 +965,90 @@ public class IndexStatsTests extends ElasticsearchIntegrationTest { } } + private void assertEquals(FilterCacheStats stats1, FilterCacheStats stats2) { + assertEquals(stats1.getCacheCount(), stats2.getCacheCount()); + assertEquals(stats1.getCacheSize(), stats2.getCacheSize()); + assertEquals(stats1.getEvictions(), stats2.getEvictions()); + assertEquals(stats1.getHitCount(), stats2.getHitCount()); + assertEquals(stats2.getMemorySizeInBytes(), stats2.getMemorySizeInBytes()); + assertEquals(stats1.getMissCount(), stats2.getMissCount()); + assertEquals(stats1.getTotalCount(), stats2.getTotalCount()); + } + + private void assertCumulativeFilterCacheStats(IndicesStatsResponse response) { + assertAllSuccessful(response); + FilterCacheStats total = response.getTotal().filterCache; + FilterCacheStats indexTotal = new FilterCacheStats(); + FilterCacheStats shardTotal = new FilterCacheStats(); + for (IndexStats indexStats : response.getIndices().values()) { + indexTotal.add(indexStats.getTotal().filterCache); + for (ShardStats shardStats : response.getShards()) { + shardTotal.add(shardStats.getStats().filterCache); + } + } + assertEquals(total, indexTotal); + assertEquals(total, shardTotal); + } + + public void testFilterCacheStats() throws Exception { + assertAcked(prepareCreate("index").setSettings("number_of_replicas", 0).get()); + indexRandom(true, + client().prepareIndex("index", "type", "1").setSource("foo", "bar"), + client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + ensureGreen(); + + IndicesStatsResponse response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertEquals(0, response.getTotal().filterCache.getCacheSize()); + + SearchResponse r; + assertSearchResponse(r = client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + + assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + + assertTrue(client().prepareDelete("index", "type", "1").get().isFound()); + assertTrue(client().prepareDelete("index", "type", "2").get().isFound()); + refresh(); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getCacheCount(), greaterThan(0L)); + + indexRandom(true, + client().prepareIndex("index", "type", "1").setSource("foo", "bar"), + client().prepareIndex("index", "type", "2").setSource("foo", "baz")); + assertSearchResponse(client().prepareSearch("index").setQuery(QueryBuilders.constantScoreQuery(QueryBuilders.matchQuery("foo", "baz"))).get()); + + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), greaterThan(0L)); + + assertAllSuccessful(client().admin().indices().prepareClearCache("index").setFilterCache(true).get()); + response = client().admin().indices().prepareStats("index").setFilterCache(true).get(); + assertCumulativeFilterCacheStats(response); + assertThat(response.getTotal().filterCache.getHitCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getEvictions(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getMissCount(), greaterThan(0L)); + assertThat(response.getTotal().filterCache.getCacheSize(), equalTo(0L)); + assertThat(response.getTotal().filterCache.getMemorySizeInBytes(), equalTo(0L)); + } + } diff --git a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java index e1efe59776d..9e05d915803 100644 --- a/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java +++ b/src/test/java/org/elasticsearch/indices/store/IndicesStoreIntegrationTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationComman import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.discovery.DiscoveryService; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; @@ -41,10 +42,12 @@ import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.test.disruption.SlowClusterStateProcessing; import org.junit.Test; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -58,7 +61,18 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { // simplify this and only use a single data path - return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "").build(); + return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put("path.data", "") + // by default this value is 1 sec in tests (30 sec in practice) but we adding disruption here + // which is between 1 and 2 sec can cause each of the shard deletion requests to timeout. + // to prevent this we are setting the timeout here to something highish ie. the default in practice + .put(IndicesStore.INDICES_STORE_DELETE_SHARD_TIMEOUT, new TimeValue(30, TimeUnit.SECONDS)) + .build(); + } + + @Override + protected void ensureClusterStateConsistency() throws IOException { + // testShardActiveElseWhere might change the state of a non-master node + // so we cannot check state consistency of this cluster } @Test @@ -97,9 +111,8 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(false)); logger.info("--> move shard from node_1 to node_3, and wait for relocation to finish"); - SlowClusterStateProcessing disruption = null; - if (randomBoolean()) { - disruption = new SlowClusterStateProcessing(node_3, getRandom(), 0, 0, 1000, 2000); + if (randomBoolean()) { // sometimes add cluster-state delay to trigger observers in IndicesStore.ShardActiveRequestHandler + final SlowClusterStateProcessing disruption = new SlowClusterStateProcessing(node_3, getRandom(), 0, 0, 1000, 2000); internalCluster().setDisruptionScheme(disruption); disruption.startDisrupting(); } @@ -116,6 +129,7 @@ public class IndicesStoreIntegrationTests extends ElasticsearchIntegrationTest { assertThat(Files.exists(indexDirectory(node_2, "test")), equalTo(true)); assertThat(Files.exists(shardDirectory(node_3, "test", 0)), equalTo(true)); assertThat(Files.exists(indexDirectory(node_3, "test")), equalTo(true)); + } @Test diff --git a/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java b/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java new file mode 100644 index 00000000000..e8fddf2c1aa --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/template/IndexTemplateBlocksTests.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.template; + +import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.io.IOException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.hasSize; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndexTemplateBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testIndexTemplatesWithBlocks() throws IOException { + // creates a simple index template + client().admin().indices().preparePutTemplate("template_blocks") + .setTemplate("te*") + .setOrder(0) + .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") + .startObject("field1").field("type", "string").field("store", "yes").endObject() + .startObject("field2").field("type", "string").field("store", "yes").field("index", "not_analyzed").endObject() + .endObject().endObject().endObject()) + .execute().actionGet(); + + try { + setClusterReadOnly(true); + + GetIndexTemplatesResponse response = client().admin().indices().prepareGetTemplates("template_blocks").execute().actionGet(); + assertThat(response.getIndexTemplates(), hasSize(1)); + + assertBlocked(client().admin().indices().preparePutTemplate("template_blocks_2") + .setTemplate("block*") + .setOrder(0) + .addAlias(new Alias("alias_1"))); + + assertBlocked(client().admin().indices().prepareDeleteTemplate("template_blocks")); + + } finally { + setClusterReadOnly(false); + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java index 39a54516e9b..f2487ec9e4f 100644 --- a/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java +++ b/src/test/java/org/elasticsearch/indices/template/SimpleIndexTemplateTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.indices.template; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionRequestValidationException; @@ -29,9 +28,10 @@ import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.template.get.GetIndexTemplatesResponse; import org.elasticsearch.action.admin.indices.template.put.PutIndexTemplateRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.cluster.metadata.AliasMetaData; -import org.elasticsearch.common.Priority; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.FilterBuilders; @@ -68,6 +68,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().admin().indices().preparePutTemplate("template_1") .setTemplate("te*") + .setSettings(indexSettings()) .setOrder(0) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field1").field("type", "string").field("store", "yes").endObject() @@ -77,6 +78,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().admin().indices().preparePutTemplate("template_2") .setTemplate("test*") + .setSettings(indexSettings()) .setOrder(1) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("field2").field("type", "string").field("store", "no").endObject() @@ -86,6 +88,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { // test create param assertThrows(client().admin().indices().preparePutTemplate("template_2") .setTemplate("test*") + .setSettings(indexSettings()) .setCreate(true) .setOrder(1) .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") @@ -101,8 +104,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { // index something into test_index, will match on both templates client().prepareIndex("test_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - + ensureGreen(); SearchResponse searchResponse = client().prepareSearch("test_index") .setQuery(termQuery("field1", "value1")) .addField("field1").addField("field2") @@ -114,8 +116,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { client().prepareIndex("text_index", "type1", "1").setSource("field1", "value1", "field2", "value 2").setRefresh(true).execute().actionGet(); - client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet(); - + ensureGreen(); // now only match on one template (template_1) searchResponse = client().prepareSearch("text_index") .setQuery(termQuery("field1", "value1")) @@ -283,6 +284,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { } @Test + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/8802") public void testBrokenMapping() throws Exception { // clean all templates setup by the framework. client().admin().indices().prepareDeleteTemplate("*").get(); @@ -344,7 +346,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { .addAlias(new Alias("templated_alias-{index}")) .addAlias(new Alias("filtered_alias").filter("{\"type\":{\"value\":\"type2\"}}")) .addAlias(new Alias("complex_filtered_alias") - .filter(FilterBuilders.termsFilter("_type", "typeX", "typeY", "typeZ").execution("bool").cache(true))) + .filter(FilterBuilders.termsFilter("_type", "typeX", "typeY", "typeZ").execution("bool"))) .get(); assertAcked(prepareCreate("test_index").addMapping("type1").addMapping("type2").addMapping("typeX").addMapping("typeY").addMapping("typeZ")); @@ -492,10 +494,10 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { createIndex("test"); fail("index creation should have failed due to invalid alias filter in matching index template"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); assertThat(e.getCause(), instanceOf(QueryParsingException.class)); - assertThat(e.getCause().getMessage(), equalTo("[test] No filter registered for [invalid]")); + assertThat(e.getCause().getMessage(), equalTo("No filter registered for [invalid]")); } } @@ -509,7 +511,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { putIndexTemplateRequestBuilder.get(); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("failed to parse filter for alias [invalid_alias]")); } @@ -530,7 +532,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { createIndex("test"); fail("index creation should have failed due to alias with existing index name in mathching index template"); } catch(InvalidAliasNameException e) { - assertThat(e.getMessage(), equalTo("[test] Invalid alias name [index], an index exists with the same name as the alias")); + assertThat(e.getMessage(), equalTo("Invalid alias name [index], an index exists with the same name as the alias")); } } @@ -543,7 +545,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { putIndexTemplateRequestBuilder.get(); fail("put template should have failed due to alias with empty name"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("alias name is required")); } } @@ -557,7 +559,7 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { try { putIndexTemplateRequestBuilder.get(); fail("put template should have failed due to alias with multiple index routings"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), equalTo("alias [alias] has several index routing values associated with it")); } } @@ -604,38 +606,66 @@ public class SimpleIndexTemplateTests extends ElasticsearchIntegrationTest { public void testStrictAliasParsingInIndicesCreatedViaTemplates() throws Exception { // Indexing into a should succeed, because the field mapping for field 'field' is defined in the test mapping. client().admin().indices().preparePutTemplate("template1") - .setTemplate("a") + .setTemplate("a*") .setOrder(0) .addMapping("test", "field", "type=string") .addAlias(new Alias("alias1").filter(termFilter("field", "value"))).get(); // Indexing into b should succeed, because the field mapping for field 'field' is defined in the _default_ mapping and the test type exists. client().admin().indices().preparePutTemplate("template2") - .setTemplate("b") + .setTemplate("b*") .setOrder(0) .addMapping("_default_", "field", "type=string") .addMapping("test") .addAlias(new Alias("alias2").filter(termFilter("field", "value"))).get(); // Indexing into c should succeed, because the field mapping for field 'field' is defined in the _default_ mapping. client().admin().indices().preparePutTemplate("template3") - .setTemplate("c") + .setTemplate("c*") .setOrder(0) .addMapping("_default_", "field", "type=string") .addAlias(new Alias("alias3").filter(termFilter("field", "value"))).get(); // Indexing into d index should fail, since there is field with name 'field' in the mapping client().admin().indices().preparePutTemplate("template4") - .setTemplate("d") + .setTemplate("d*") .setOrder(0) .addAlias(new Alias("alias4").filter(termFilter("field", "value"))).get(); - client().prepareIndex("a", "test", "test").setSource("{}").get(); - client().prepareIndex("b", "test", "test").setSource("{}").get(); - client().prepareIndex("c", "test", "test").setSource("{}").get(); + client().prepareIndex("a1", "test", "test").setSource("{}").get(); + BulkResponse response = client().prepareBulk().add(new IndexRequest("a2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getIndex(), equalTo("a2")); + assertThat(response.getItems()[0].getType(), equalTo("test")); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); + + client().prepareIndex("b1", "test", "test").setSource("{}").get(); + response = client().prepareBulk().add(new IndexRequest("b2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getIndex(), equalTo("b2")); + assertThat(response.getItems()[0].getType(), equalTo("test")); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); + + client().prepareIndex("c1", "test", "test").setSource("{}").get(); + response = client().prepareBulk().add(new IndexRequest("c2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(false)); + assertThat(response.getItems()[0].isFailed(), equalTo(false)); + assertThat(response.getItems()[0].getIndex(), equalTo("c2")); + assertThat(response.getItems()[0].getType(), equalTo("test")); + assertThat(response.getItems()[0].getId(), equalTo("test")); + assertThat(response.getItems()[0].getVersion(), equalTo(1l)); + try { - client().prepareIndex("d", "test", "test").setSource("{}").get(); + client().prepareIndex("d1", "test", "test").setSource("{}").get(); fail(); } catch (Exception e) { - assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(ExceptionsHelper.unwrapCause(e), instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), containsString("failed to parse filter for alias [alias4]")); } + response = client().prepareBulk().add(new IndexRequest("d2", "test", "test").source("{}")).get(); + assertThat(response.hasFailures(), is(true)); + assertThat(response.getItems()[0].isFailed(), equalTo(true)); + assertThat(response.getItems()[0].getFailureMessage(), containsString("failed to parse filter for alias [alias4]")); } } diff --git a/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java b/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java new file mode 100644 index 00000000000..a2735d6134e --- /dev/null +++ b/src/test/java/org/elasticsearch/indices/warmer/IndicesWarmerBlocksTests.java @@ -0,0 +1,158 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.indices.warmer; + + +import com.carrotsearch.hppc.cursors.ObjectObjectCursor; +import com.google.common.collect.ImmutableList; +import org.elasticsearch.action.admin.indices.warmer.get.GetWarmersResponse; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.search.warmer.IndexWarmersMetaData; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.junit.Test; + +import java.util.Arrays; + +import static org.elasticsearch.cluster.metadata.IndexMetaData.*; +import static org.elasticsearch.cluster.metadata.MetaData.CLUSTER_READ_ONLY_BLOCK; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertBlocked; +import static org.hamcrest.Matchers.equalTo; + +@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST) +public class IndicesWarmerBlocksTests extends ElasticsearchIntegrationTest { + + @Test + public void testPutWarmerWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + // Index reads are blocked, the warmer can't be registered + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_READ); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_READ); + } + + // Index writes are blocked, the warmer can be registered + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE); + assertAcked(client().admin().indices().preparePutWarmer("warmer_acked") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_WRITE); + } + + // Index metadata changes are blocked, the warmer can't be registered + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + + // Index metadata changes are blocked, the warmer can't be registered + try { + enableIndexBlock("test-blocks", SETTING_READ_ONLY); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-*").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), INDEX_READ_ONLY_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_READ_ONLY); + } + + // Adding a new warmer is not possible when the cluster is read-only + try { + setClusterReadOnly(true); + assertBlocked(client().admin().indices().preparePutWarmer("warmer_blocked") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery())), CLUSTER_READ_ONLY_BLOCK); + } finally { + setClusterReadOnly(false); + } + } + + @Test + public void testGetWarmerWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + assertAcked(client().admin().indices().preparePutWarmer("warmer_block") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) { + try { + enableIndexBlock("test-blocks", blockSetting); + GetWarmersResponse response = client().admin().indices().prepareGetWarmers("test-blocks").get(); + assertThat(response.warmers().size(), equalTo(1)); + + ObjectObjectCursor> entry = response.warmers().iterator().next(); + assertThat(entry.key, equalTo("test-blocks")); + assertThat(entry.value.size(), equalTo(1)); + assertThat(entry.value.iterator().next().name(), equalTo("warmer_block")); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + try { + enableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + assertBlocked(client().admin().indices().prepareGetWarmers("test-blocks"), INDEX_METADATA_BLOCK); + } finally { + disableIndexBlock("test-blocks", SETTING_BLOCKS_METADATA); + } + } + + @Test + public void testDeleteWarmerWithBlocks() { + createIndex("test-blocks"); + ensureGreen("test-blocks"); + + // Request is not blocked + for (String blockSetting : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) { + try { + assertAcked(client().admin().indices().preparePutWarmer("warmer_block") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + + enableIndexBlock("test-blocks", blockSetting); + assertAcked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block")); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + + // Request is blocked + for (String blockSetting : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) { + try { + assertAcked(client().admin().indices().preparePutWarmer("warmer_block") + .setSearchRequest(client().prepareSearch("test-blocks").setTypes("a1").setQuery(QueryBuilders.matchAllQuery()))); + + enableIndexBlock("test-blocks", blockSetting); + assertBlocked(client().admin().indices().prepareDeleteWarmer().setIndices("test-blocks").setNames("warmer_block")); + } finally { + disableIndexBlock("test-blocks", blockSetting); + } + } + } +} diff --git a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java index deb955ed51c..5802d20f1dd 100644 --- a/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java +++ b/src/test/java/org/elasticsearch/indices/warmer/SimpleIndicesWarmerTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.indices.warmer; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import com.google.common.collect.ImmutableList; + import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.admin.indices.segments.IndexSegments; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; diff --git a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java index c7456fa3a3a..a788964bbfb 100644 --- a/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java +++ b/src/test/java/org/elasticsearch/nested/SimpleNestedTests.java @@ -256,113 +256,6 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { } } - @Test - public void simpleNestedDeletedByQuery1() throws Exception { - simpleNestedDeleteByQuery(3, 0); - } - - @Test - public void simpleNestedDeletedByQuery2() throws Exception { - simpleNestedDeleteByQuery(3, 1); - } - - @Test - public void simpleNestedDeletedByQuery3() throws Exception { - simpleNestedDeleteByQuery(3, 2); - } - - private void simpleNestedDeleteByQuery(int total, int docToDelete) throws Exception { - - assertAcked(prepareCreate("test") - .setSettings(settingsBuilder().put(indexSettings()).put("index.referesh_interval", -1).build()) - .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject().endObject().endObject())); - - ensureGreen(); - - for (int i = 0; i < total; i++) { - client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject() - .field("field1", "value1") - .startArray("nested1") - .startObject() - .field("n_field1", "n_value1_1") - .field("n_field2", "n_value2_1") - .endObject() - .startObject() - .field("n_field1", "n_value1_2") - .field("n_field2", "n_value2_2") - .endObject() - .endArray() - .endObject()).execute().actionGet(); - } - - - flush(); - assertDocumentCount("test", total * 3); - - client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet(); - flush(); - refresh(); - assertDocumentCount("test", (total * 3l) - 3); - - for (int i = 0; i < total; i++) { - assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete)); - } - } - - @Test - public void noChildrenNestedDeletedByQuery1() throws Exception { - noChildrenNestedDeleteByQuery(3, 0); - } - - @Test - public void noChildrenNestedDeletedByQuery2() throws Exception { - noChildrenNestedDeleteByQuery(3, 1); - } - - @Test - public void noChildrenNestedDeletedByQuery3() throws Exception { - noChildrenNestedDeleteByQuery(3, 2); - } - - private void noChildrenNestedDeleteByQuery(long total, int docToDelete) throws Exception { - - assertAcked(prepareCreate("test") - .setSettings(settingsBuilder().put(indexSettings()).put("index.referesh_interval", -1).build()) - .addMapping("type1", jsonBuilder().startObject().startObject("type1").startObject("properties") - .startObject("nested1") - .field("type", "nested") - .endObject() - .endObject().endObject().endObject())); - - ensureGreen(); - - - for (int i = 0; i < total; i++) { - client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject() - .field("field1", "value1") - .endObject()).execute().actionGet(); - } - - - flush(); - refresh(); - - assertDocumentCount("test", total); - - client().prepareDeleteByQuery("test").setQuery(QueryBuilders.idsQuery("type1").ids(Integer.toString(docToDelete))).execute().actionGet(); - flush(); - refresh(); - assertDocumentCount("test", total - 1); - - for (int i = 0; i < total; i++) { - assertThat(client().prepareGet("test", "type1", Integer.toString(i)).execute().actionGet().isExists(), equalTo(i != docToDelete)); - } - } - @Test public void multiNested() throws Exception { assertAcked(prepareCreate("test") @@ -487,15 +380,6 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { flush(); refresh(); assertDocumentCount("test", 6); - - client().prepareDeleteByQuery("alias1").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - flush(); - refresh(); - - // This must be 3, otherwise child docs aren't deleted. - // If this is 5 then only the parent has been removed - assertDocumentCount("test", 3); - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); } @Test @@ -532,7 +416,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); Explanation explanation = searchResponse.getHits().hits()[0].explanation(); assertThat(explanation.getValue(), equalTo(2f)); - assertThat(explanation.toString(), equalTo("2.0 = (MATCH) sum of:\n 2.0 = (MATCH) Score based on child doc range from 0 to 1\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = (MATCH) Match on id 2\n")); + assertThat(explanation.toString(), startsWith("2.0 = sum of:\n 2.0 = Score based on child doc range from 0 to 1\n")); // TODO: Enable when changes from BlockJoinQuery#explain are added to Lucene (Most likely version 4.2) // assertThat(explanation.getDetails().length, equalTo(2)); // assertThat(explanation.getDetails()[0].getValue(), equalTo(1f)); @@ -707,7 +591,7 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { .execute().actionGet(); Assert.fail("SearchPhaseExecutionException should have been thrown"); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("type [string] doesn't support mode [SUM]")); + assertThat(e.toString(), containsString("type [string] doesn't support mode [SUM]")); } } @@ -1371,4 +1255,4 @@ public class SimpleNestedTests extends ElasticsearchIntegrationTest { } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java index 9815887f1d1..69dac24aa69 100644 --- a/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/operateAllIndices/DestructiveOperationsIntegrationTests.java @@ -19,11 +19,9 @@ package org.elasticsearch.operateAllIndices; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -54,12 +52,14 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat // should fail since index1 is the only index. client().admin().indices().prepareDelete("i*").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } try { client().admin().indices().prepareDelete("_all").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } settings = ImmutableSettings.builder() .put(DestructiveOperations.REQUIRES_NAME, false) @@ -85,20 +85,22 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat try { client().admin().indices().prepareClose("_all").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } try { assertAcked(client().admin().indices().prepareOpen("_all").get()); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { client().admin().indices().prepareClose("*").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) {} + } catch (IllegalArgumentException e) { + } try { assertAcked(client().admin().indices().prepareOpen("*").get()); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } settings = ImmutableSettings.builder() @@ -110,40 +112,5 @@ public class DestructiveOperationsIntegrationTests extends ElasticsearchIntegrat // end close index: client().admin().indices().prepareDelete("_all").get(); - // delete_by_query: - settings = ImmutableSettings.builder() - .put(DestructiveOperations.REQUIRES_NAME, true) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - assertAcked(client().admin().indices().prepareCreate("index1").get()); - assertAcked(client().admin().indices().prepareCreate("1index").get()); - - // Should succeed, since no wildcards - client().prepareDeleteByQuery("1index").setQuery(QueryBuilders.matchAllQuery()).get(); - - try { - client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get(); - fail(); - } catch (ElasticsearchIllegalArgumentException e) {} - - try { - client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get(); - fail(); - } catch (ElasticsearchIllegalArgumentException e) {} - - settings = ImmutableSettings.builder() - .put(DestructiveOperations.REQUIRES_NAME, false) - .build(); - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - - client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get(); - client().prepareDeleteByQuery("_all").setQuery(QueryBuilders.matchAllQuery()).get(); - - assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings)); - client().prepareDeleteByQuery().setQuery(QueryBuilders.matchAllQuery()).get(); - // end delete_by_query: - client().admin().indices().prepareDelete("_all").get(); } - } diff --git a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java index 263af854883..4540cc75a06 100644 --- a/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java +++ b/src/test/java/org/elasticsearch/percolator/PercolatorFacetsAndAggregationsTests.java @@ -20,11 +20,16 @@ package org.elasticsearch.percolator; import org.elasticsearch.action.percolate.PercolateRequestBuilder; import org.elasticsearch.action.percolate.PercolateResponse; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.AggregationBuilders; +import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.Aggregator.SubAggCollectionMode; import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.reducers.ReducerBuilders; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -40,6 +45,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertMatc import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; /** * @@ -66,20 +72,18 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati expectedCount[i % numUniqueQueries]++; QueryBuilder queryBuilder = matchQuery("field1", value); client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) - .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()) - .execute().actionGet(); + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() + .actionGet(); } client().admin().indices().prepareRefresh("test").execute().actionGet(); for (int i = 0; i < numQueries; i++) { String value = values[i % numUniqueQueries]; - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() - .setIndices("test").setDocumentType("type") + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); - percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2") - .collectMode(aggCollectionMode )); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); if (randomBoolean()) { percolateRequestBuilder.setPercolateQuery(matchAllQuery()); @@ -111,16 +115,153 @@ public class PercolatorFacetsAndAggregationsTests extends ElasticsearchIntegrati } } + @Test + // Just test the integration with facets and aggregations, not the facet and aggregation functionality! + public void testAggregationsAndReducers() throws Exception { + assertAcked(prepareCreate("test").addMapping("type", "field1", "type=string", "field2", "type=string")); + ensureGreen(); + + int numQueries = scaledRandomIntBetween(250, 500); + int numUniqueQueries = between(1, numQueries / 2); + String[] values = new String[numUniqueQueries]; + for (int i = 0; i < values.length; i++) { + values[i] = "value" + i; + } + int[] expectedCount = new int[numUniqueQueries]; + + logger.info("--> registering {} queries", numQueries); + for (int i = 0; i < numQueries; i++) { + String value = values[i % numUniqueQueries]; + expectedCount[i % numUniqueQueries]++; + QueryBuilder queryBuilder = matchQuery("field1", value); + client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", "b").endObject()).execute() + .actionGet(); + } + client().admin().indices().prepareRefresh("test").execute().actionGet(); + + for (int i = 0; i < numQueries; i++) { + String value = values[i % numUniqueQueries]; + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + + SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("a").field("field2").collectMode(aggCollectionMode)); + + if (randomBoolean()) { + percolateRequestBuilder.setPercolateQuery(matchAllQuery()); + } + if (randomBoolean()) { + percolateRequestBuilder.setScore(true); + } else { + percolateRequestBuilder.setSortByScore(true).setSize(numQueries); + } + + boolean countOnly = randomBoolean(); + if (countOnly) { + percolateRequestBuilder.setOnlyCount(countOnly); + } + + percolateRequestBuilder.addAggregation(ReducerBuilders.maxBucket("max_a").setBucketsPaths("a>_count")); + + PercolateResponse response = percolateRequestBuilder.execute().actionGet(); + assertMatchCount(response, expectedCount[i % numUniqueQueries]); + if (!countOnly) { + assertThat(response.getMatches(), arrayWithSize(expectedCount[i % numUniqueQueries])); + } + + Aggregations aggregations = response.getAggregations(); + assertThat(aggregations.asList().size(), equalTo(2)); + Terms terms = aggregations.get("a"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("a")); + List buckets = new ArrayList<>(terms.getBuckets()); + assertThat(buckets.size(), equalTo(1)); + assertThat(buckets.get(0).getKeyAsString(), equalTo("b")); + assertThat(buckets.get(0).getDocCount(), equalTo((long) expectedCount[i % values.length])); + + InternalBucketMetricValue maxA = aggregations.get("max_a"); + assertThat(maxA, notNullValue()); + assertThat(maxA.getName(), equalTo("max_a")); + assertThat(maxA.value(), equalTo((double) expectedCount[i % values.length])); + assertThat(maxA.keys(), equalTo(new String[] { "b" })); + } + } + @Test public void testSignificantAggs() throws Exception { client().admin().indices().prepareCreate("test").execute().actionGet(); ensureGreen(); - PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate() - .setIndices("test").setDocumentType("type") + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", "value").endObject())) .addAggregation(AggregationBuilders.significantTerms("a").field("field2")); PercolateResponse response = percolateRequestBuilder.get(); assertNoFailures(response); } + @Test + public void testSingleShardAggregations() throws Exception { + assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put(indexSettings()).put("SETTING_NUMBER_OF_SHARDS", 1)) + .addMapping("type", "field1", "type=string", "field2", "type=string")); + ensureGreen(); + + int numQueries = scaledRandomIntBetween(250, 500); + + logger.info("--> registering {} queries", numQueries); + for (int i = 0; i < numQueries; i++) { + String value = "value0"; + QueryBuilder queryBuilder = matchQuery("field1", value); + client().prepareIndex("test", PercolatorService.TYPE_NAME, Integer.toString(i)) + .setSource(jsonBuilder().startObject().field("query", queryBuilder).field("field2", i % 3 == 0 ? "b" : "a").endObject()) + .execute() + .actionGet(); + } + client().admin().indices().prepareRefresh("test").execute().actionGet(); + + for (int i = 0; i < numQueries; i++) { + String value = "value0"; + PercolateRequestBuilder percolateRequestBuilder = client().preparePercolate().setIndices("test").setDocumentType("type") + .setPercolateDoc(docBuilder().setDoc(jsonBuilder().startObject().field("field1", value).endObject())); + + SubAggCollectionMode aggCollectionMode = randomFrom(SubAggCollectionMode.values()); + percolateRequestBuilder.addAggregation(AggregationBuilders.terms("terms").field("field2").collectMode(aggCollectionMode) + .order(Order.term(true)).shardSize(2).size(1)); + + if (randomBoolean()) { + percolateRequestBuilder.setPercolateQuery(matchAllQuery()); + } + if (randomBoolean()) { + percolateRequestBuilder.setScore(true); + } else { + percolateRequestBuilder.setSortByScore(true).setSize(numQueries); + } + + boolean countOnly = randomBoolean(); + if (countOnly) { + percolateRequestBuilder.setOnlyCount(countOnly); + } + + percolateRequestBuilder.addAggregation(ReducerBuilders.maxBucket("max_terms").setBucketsPaths("terms>_count")); + + PercolateResponse response = percolateRequestBuilder.execute().actionGet(); + assertMatchCount(response, numQueries); + if (!countOnly) { + assertThat(response.getMatches(), arrayWithSize(numQueries)); + } + + Aggregations aggregations = response.getAggregations(); + assertThat(aggregations.asList().size(), equalTo(2)); + Terms terms = aggregations.get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = new ArrayList<>(terms.getBuckets()); + assertThat(buckets.size(), equalTo(1)); + assertThat(buckets.get(0).getKeyAsString(), equalTo("a")); + + InternalBucketMetricValue maxA = aggregations.get("max_terms"); + assertThat(maxA, notNullValue()); + assertThat(maxA.getName(), equalTo("max_terms")); + assertThat(maxA.keys(), equalTo(new String[] { "a" })); + } + } } diff --git a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java index 2489fec6b3a..21fa628f2a8 100644 --- a/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java +++ b/src/test/java/org/elasticsearch/plugins/PluginManagerTests.java @@ -23,17 +23,14 @@ import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.node.info.PluginInfo; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; -import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -68,7 +65,7 @@ import static org.hamcrest.Matchers.notNullValue; // if its in your classpath, then do not use plugins!!!!!! public class PluginManagerTests extends ElasticsearchIntegrationTest { - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testDownloadAndExtract_NullName_ThrowsException() throws IOException { pluginManager(getPluginUrlForResource("plugin_single_folder.zip")).downloadAndExtract(null); } @@ -422,7 +419,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest { @Test @Network public void testInstallPluginWithElasticsearchDownloadService() throws IOException { - assumeTrue("download.elasticsearch.org is accessible", isDownloadServiceWorking("download.elasticsearch.org", 80, "/elasticsearch/ci-test.txt")); + assumeTrue("download.elastic.co is accessible", isDownloadServiceWorking("download.elastic.co", 80, "/elasticsearch/ci-test.txt")); singlePluginInstallAndRemove("elasticsearch/elasticsearch-transport-thrift/2.4.0", null); } @@ -480,12 +477,12 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest { singlePluginInstallAndRemove("groupid/plugintest", getPluginUrlForResource("plugin_without_folders.zip")); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testRemovePlugin_NullName_ThrowsException() throws IOException { pluginManager(getPluginUrlForResource("plugin_single_folder.zip")).removePlugin(null); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testRemovePluginWithURLForm() throws Exception { PluginManager pluginManager = pluginManager(null); pluginManager.removePlugin("file://whatever"); @@ -510,7 +507,7 @@ public class PluginManagerTests extends ElasticsearchIntegrationTest { pluginManager(null).removePlugin(name); fail("this plugin name [" + name + "] should not be allowed"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // We expect that error } } diff --git a/src/test/java/org/elasticsearch/plugins/SitePluginTests.java b/src/test/java/org/elasticsearch/plugins/SitePluginTests.java index 7889ec9ead9..8106c6f60dd 100644 --- a/src/test/java/org/elasticsearch/plugins/SitePluginTests.java +++ b/src/test/java/org/elasticsearch/plugins/SitePluginTests.java @@ -34,6 +34,9 @@ import org.junit.Test; import java.net.URISyntaxException; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; @@ -86,6 +89,34 @@ public class SitePluginTests extends ElasticsearchIntegrationTest { assertThat(response.getBody(), containsString("Dummy Site Plugin")); } + /** + * Test normalizing of path + */ + @Test + public void testThatPathsAreNormalized() throws Exception { + // more info: https://www.owasp.org/index.php/Path_Traversal + List notFoundUris = new ArrayList<>(); + notFoundUris.add("/_plugin/dummy/../../../../../log4j.properties"); + notFoundUris.add("/_plugin/dummy/../../../../../%00log4j.properties"); + notFoundUris.add("/_plugin/dummy/..%c0%af..%c0%af..%c0%af..%c0%af..%c0%aflog4j.properties"); + notFoundUris.add("/_plugin/dummy/%2E%2E/%2E%2E/%2E%2E/%2E%2E/index.html"); + notFoundUris.add("/_plugin/dummy/%2e%2e/%2e%2e/%2e%2e/%2e%2e/index.html"); + notFoundUris.add("/_plugin/dummy/%2e%2e%2f%2e%2e%2f%2e%2e%2f%2e%2e%2findex.html"); + notFoundUris.add("/_plugin/dummy/%2E%2E/%2E%2E/%2E%2E/%2E%2E/index.html"); + notFoundUris.add("/_plugin/dummy/..\\..\\..\\..\\..\\log4j.properties"); + + for (String uri : notFoundUris) { + HttpResponse response = httpClient().path(uri).execute(); + String message = String.format(Locale.ROOT, "URI [%s] expected to be not found", uri); + assertThat(message, response.getStatusCode(), equalTo(RestStatus.NOT_FOUND.getStatus())); + } + + // using relative path inside of the plugin should work + HttpResponse response = httpClient().path("/_plugin/dummy/dir1/../dir1/../index.html").execute(); + assertThat(response.getStatusCode(), equalTo(RestStatus.OK.getStatus())); + assertThat(response.getBody(), containsString("Dummy Site Plugin")); + } + /** * Test case for #4845: https://github.com/elasticsearch/elasticsearch/issues/4845 * Serving _site plugins do not pick up on index.html for sub directories diff --git a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java index 989ea7194fc..579408366e9 100644 --- a/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java +++ b/src/test/java/org/elasticsearch/rest/BytesRestResponseTests.java @@ -20,11 +20,18 @@ package org.elasticsearch.rest; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.query.TestQueryParsingException; +import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.rest.FakeRestRequest; +import org.elasticsearch.transport.RemoteTransportException; import org.junit.Test; import java.io.FileNotFoundException; +import java.io.IOException; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -70,8 +77,8 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().toUtf8(); - assertThat(text, containsString("ElasticsearchException[an error occurred reading data]")); - assertThat(text, containsString("FileNotFoundException[/foo/bar]")); + assertThat(text, containsString("{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}")); + assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}")); } @Test @@ -97,11 +104,28 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { Throwable t = new Throwable("an error occurred reading data", new FileNotFoundException("/foo/bar")); BytesRestResponse response = new BytesRestResponse(channel, t); String text = response.content().toUtf8(); - assertThat(text, containsString("\"error\":\"Throwable[an error occurred reading data]")); - assertThat(text, containsString("FileNotFoundException[/foo/bar]")); + assertThat(text, containsString("\"type\":\"throwable\",\"reason\":\"an error occurred reading data\"")); + assertThat(text, containsString("{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}")); assertThat(text, containsString("\"error_trace\":{\"message\":\"an error occurred reading data\"")); } + public void testGuessRootCause() throws IOException { + RestRequest request = new FakeRestRequest(); + RestChannel channel = new DetailedExceptionRestChannel(request); + { + Throwable t = new ElasticsearchException("an error occurred reading data", new FileNotFoundException("/foo/bar")); + BytesRestResponse response = new BytesRestResponse(channel, t); + String text = response.content().toUtf8(); + assertThat(text, containsString("{\"root_cause\":[{\"type\":\"exception\",\"reason\":\"an error occurred reading data\"}]")); + } + { + Throwable t = new FileNotFoundException("/foo/bar"); + BytesRestResponse response = new BytesRestResponse(channel, t); + String text = response.content().toUtf8(); + assertThat(text, containsString("{\"root_cause\":[{\"type\":\"file_not_found_exception\",\"reason\":\"/foo/bar\"}]")); + } + } + @Test public void testNullThrowable() throws Exception { RestRequest request = new FakeRestRequest(); @@ -109,10 +133,25 @@ public class BytesRestResponseTests extends ElasticsearchTestCase { BytesRestResponse response = new BytesRestResponse(channel, null); String text = response.content().toUtf8(); - assertThat(text, containsString("\"error\":\"Unknown\"")); + assertThat(text, containsString("\"error\":\"unknown\"")); assertThat(text, not(containsString("error_trace"))); } + @Test + public void testConvert() throws IOException { + RestRequest request = new FakeRestRequest(); + RestChannel channel = new DetailedExceptionRestChannel(request); + ShardSearchFailure failure = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 1)); + ShardSearchFailure failure1 = new ShardSearchFailure(new TestQueryParsingException(new Index("foo"), "foobar", null), + new SearchShardTarget("node_1", "foo", 2)); + SearchPhaseExecutionException ex = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] {failure, failure1}); + BytesRestResponse response = new BytesRestResponse(channel, new RemoteTransportException("foo", ex)); + String text = response.content().toUtf8(); + String expected = "{\"error\":{\"root_cause\":[{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}],\"type\":\"search_phase_execution_exception\",\"reason\":\"all shards failed\",\"phase\":\"search\",\"grouped\":true,\"failed_shards\":[{\"shard\":1,\"index\":\"foo\",\"node\":\"node_1\",\"reason\":{\"type\":\"test_query_parsing_exception\",\"reason\":\"foobar\",\"index\":\"foo\"}}]},\"status\":400}"; + assertEquals(expected.trim(), text.trim()); + } + private static class ExceptionWithHeaders extends ElasticsearchException.WithRestHeaders { ExceptionWithHeaders() { diff --git a/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java b/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java index e8501c40d1f..dc58961cc4b 100644 --- a/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java +++ b/src/test/java/org/elasticsearch/rest/HeadersAndContextCopyClientTests.java @@ -445,7 +445,7 @@ public class HeadersAndContextCopyClientTests extends ElasticsearchTestCase { } @Override - public void close() throws ElasticsearchException { + public void close() { } diff --git a/src/test/java/org/elasticsearch/river/RiverTests.java b/src/test/java/org/elasticsearch/river/RiverTests.java index 1d7c308d497..6587ca84c72 100644 --- a/src/test/java/org/elasticsearch/river/RiverTests.java +++ b/src/test/java/org/elasticsearch/river/RiverTests.java @@ -20,6 +20,8 @@ package org.elasticsearch.river; import com.google.common.base.Predicate; + +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.get.MultiGetItemResponse; import org.elasticsearch.action.get.MultiGetRequestBuilder; @@ -29,15 +31,16 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.river.dummy.DummyRiverModule; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import org.junit.Test; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; import static org.hamcrest.Matchers.equalTo; @ClusterScope(scope = ElasticsearchIntegrationTest.Scope.SUITE) +@AwaitsFix(bugUrl="occasionally fails apparently due to synchronous mappings updates") public class RiverTests extends ElasticsearchIntegrationTest { @Test diff --git a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java index 861fdf7b1af..9b2b11fddad 100644 --- a/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/AliasResolveRoutingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.routing; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -65,14 +64,14 @@ public class AliasResolveRoutingTests extends ElasticsearchIntegrationTest { try { clusterService().state().metaData().resolveIndexRouting("1", "alias10"); fail("should fail"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // all is well, we can't have two mappings, one provided, and one in the alias } try { clusterService().state().metaData().resolveIndexRouting(null, "alias0"); fail("should fail"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { // Expected } } diff --git a/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java index 8b4fef56852..8fd37a7804d 100644 --- a/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/AliasRoutingTests.java @@ -102,24 +102,6 @@ public class AliasRoutingTests extends ElasticsearchIntegrationTest { assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); } - - logger.info("--> deleting_by_query with 1 as routing, should not delete anything"); - client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet(); - refresh(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(true)); - } - - logger.info("--> deleting_by_query with alias0, should delete"); - client().prepareDeleteByQuery("alias0").setQuery(matchAllQuery()).execute().actionGet(); - refresh(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("alias0", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - } } @Test diff --git a/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java index 55b796acb4a..cca1d2125a2 100644 --- a/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java +++ b/src/test/java/org/elasticsearch/routing/SimpleRoutingTests.java @@ -87,22 +87,6 @@ public class SimpleRoutingTests extends ElasticsearchIntegrationTest { for (int i = 0; i < 5; i++) { assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); } - - logger.info("--> deleting_by_query with 1 as routing, should not delete anything"); - client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("1").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(true)); - } - - logger.info("--> deleting_by_query with , should delete"); - client().prepareDeleteByQuery().setQuery(matchAllQuery()).setRouting("0").execute().actionGet(); - client().admin().indices().prepareRefresh().execute().actionGet(); - for (int i = 0; i < 5; i++) { - assertThat(client().prepareGet("test", "type1", "1").execute().actionGet().isExists(), equalTo(false)); - assertThat(client().prepareGet("test", "type1", "1").setRouting("0").execute().actionGet().isExists(), equalTo(false)); - } } public void testSimpleSearchRouting() { diff --git a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java index 76a9ff03e15..b2acd54a373 100644 --- a/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java +++ b/src/test/java/org/elasticsearch/script/CustomScriptContextTests.java @@ -21,7 +21,6 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; @@ -56,7 +55,7 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { for (String lang : LANG_SET) { for (ScriptService.ScriptType scriptType : ScriptService.ScriptType.values()) { try { - scriptService.compile(lang, "test", scriptType, new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op")); + scriptService.compile(new Script(lang, "test", scriptType, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_globally_disabled_op")); fail("script compilation should have been rejected"); } catch(ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [" + scriptType + "], operation [" + PLUGIN_NAME + "_custom_globally_disabled_op] and lang [" + lang + "] are disabled")); @@ -65,20 +64,20 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { } try { - scriptService.compile("expression", "1", ScriptService.ScriptType.INLINE, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); + scriptService.compile(new Script("expression", "1", ScriptService.ScriptType.INLINE, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); fail("script compilation should have been rejected"); } catch(ScriptException e) { assertThat(e.getMessage(), containsString("scripts of type [inline], operation [" + PLUGIN_NAME + "_custom_exp_disabled_op] and lang [expression] are disabled")); } - CompiledScript compiledScript = scriptService.compile("expression", "1", ScriptService.ScriptType.INLINE, randomFrom(ScriptContext.Standard.values())); + CompiledScript compiledScript = scriptService.compile(new Script("expression", "1", ScriptService.ScriptType.INLINE, null), randomFrom(ScriptContext.Standard.values())); assertThat(compiledScript, notNullValue()); - compiledScript = scriptService.compile("mustache", "1", ScriptService.ScriptType.INLINE, new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); + compiledScript = scriptService.compile(new Script("mustache", "1", ScriptService.ScriptType.INLINE, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_exp_disabled_op")); assertThat(compiledScript, notNullValue()); for (String lang : LANG_SET) { - compiledScript = scriptService.compile(lang, "1", ScriptService.ScriptType.INLINE, new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")); + compiledScript = scriptService.compile(new Script(lang, "1", ScriptService.ScriptType.INLINE, null), new ScriptContext.Plugin(PLUGIN_NAME, "custom_op")); assertThat(compiledScript, notNullValue()); } } @@ -87,9 +86,9 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { public void testCompileNonRegisteredPluginContext() { ScriptService scriptService = internalCluster().getInstance(ScriptService.class); try { - scriptService.compile(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), new ScriptContext.Plugin("test", "unknown")); + scriptService.compile(new Script(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), null), new ScriptContext.Plugin("test", "unknown")); fail("script compilation should have been rejected"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test_unknown] not supported")); } } @@ -98,14 +97,14 @@ public class CustomScriptContextTests extends ElasticsearchIntegrationTest { public void testCompileNonRegisteredScriptContext() { ScriptService scriptService = internalCluster().getInstance(ScriptService.class); try { - scriptService.compile(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), new ScriptContext() { + scriptService.compile(new Script(randomFrom(LANG_SET.toArray(new String[LANG_SET.size()])), "test", randomFrom(ScriptService.ScriptType.values()), null), new ScriptContext() { @Override public String getKey() { return "test"; } }); fail("script compilation should have been rejected"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [test] not supported")); } } diff --git a/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java b/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java deleted file mode 100644 index 8e521a6f392..00000000000 --- a/src/test/java/org/elasticsearch/script/GroovySandboxScriptTests.java +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.action.search.SearchPhaseExecutionException; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.CoreMatchers.equalTo; - -/** - * Tests for the Groovy scripting sandbox - */ -@ElasticsearchIntegrationTest.ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numDataNodes = 0) -public class GroovySandboxScriptTests extends ElasticsearchIntegrationTest { - - @Test - public void testSandboxedGroovyScript() throws Exception { - int nodes = randomIntBetween(1, 3); - Settings nodeSettings = ImmutableSettings.builder() - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, true) - .build(); - internalCluster().startNodesAsync(nodes, nodeSettings).get(); - client().admin().cluster().prepareHealth().setWaitForNodes(nodes + "").get(); - - client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get(); - - // Plain test - testSuccess(""); - // List - testSuccess("def list = [doc['foo'].value, 3, 4]; def v = list.get(1); list.add(10)"); - // Ranges - testSuccess("def range = 1..doc['foo'].value; def v = range.get(0)"); - // Maps - testSuccess("def v = doc['foo'].value; def m = [:]; m.put(\\\"value\\\", v)"); - // Times - testSuccess("def t = Instant.now().getMillis()"); - // GroovyCollections - testSuccess("def n = [1,2,3]; GroovyCollections.max(n)"); - - // Fail cases - testFailure("pr = Runtime.getRuntime().exec(\\\"touch /tmp/gotcha\\\"); pr.waitFor()", - "Method calls not allowed on [java.lang.Runtime]"); - - testFailure("d = new DateTime(); d.getClass().getDeclaredMethod(\\\"plus\\\").setAccessible(true)", - "Expression [MethodCallExpression] is not allowed: d.getClass()"); - - testFailure("d = new DateTime(); d.\\\"${'get' + 'Class'}\\\"()." + - "\\\"${'getDeclared' + 'Method'}\\\"(\\\"now\\\").\\\"${'set' + 'Accessible'}\\\"(false)", - "Expression [MethodCallExpression] is not allowed: d.$(get + Class)().$(getDeclared + Method)(now).$(set + Accessible)(false)"); - - testFailure("Class.forName(\\\"DateTime\\\").getDeclaredMethod(\\\"plus\\\").setAccessible(true)", - "Expression [MethodCallExpression] is not allowed: java.lang.Class.forName(DateTime)"); - - testFailure("Eval.me('2 + 2')", "Method calls not allowed on [groovy.util.Eval]"); - - testFailure("Eval.x(5, 'x + 2')", "Method calls not allowed on [groovy.util.Eval]"); - - testFailure("t = new java.util.concurrent.ThreadPoolExecutor(2, 2, 0L, TimeUnit.MILLISECONDS, " + - "new java.util.concurrent.LinkedBlockingQueue()); t.execute({ println 5 })", - "Expression [ConstructorCallExpression] is not allowed: new java.util.concurrent.ThreadPoolExecutor"); - - testFailure("d = new Date(); java.lang.reflect.Field f = Date.class.getDeclaredField(\\\"fastTime\\\");" + - " f.setAccessible(true); f.get(\\\"fastTime\\\")", - "Method calls not allowed on [java.lang.reflect.Field]"); - - testFailure("t = new Thread({ println 3 }); t.start(); t.join()", - "Expression [ConstructorCallExpression] is not allowed: new java.lang.Thread"); - - testFailure("Thread.start({ println 4 })", "Method calls not allowed on [java.lang.Thread]"); - - testFailure("import java.util.concurrent.ThreadPoolExecutor;", - "Importing [java.util.concurrent.ThreadPoolExecutor] is not allowed"); - - testFailure("s = new java.net.URL();", "Expression [ConstructorCallExpression] is not allowed: new java.net.URL()"); - - testFailure("def methodName = 'ex'; Runtime.\\\"${'get' + 'Runtime'}\\\"().\\\"${methodName}ec\\\"(\\\"touch /tmp/gotcha2\\\")", - "Expression [MethodCallExpression] is not allowed: java.lang.Runtime.$(get + Runtime)().$methodNameec(touch /tmp/gotcha2)"); - - testFailure("def c = [doc['foo'].value, 3, 4].&size; c()", - "Expression [MethodPointerExpression] is not allowed"); - - testFailure("[doc['foo'].value, 3, 4].invokeMethod([1,2],\\\"size\\\", new Object[0])", - "Expression [MethodCallExpression] is not allowed: [doc[foo].value, 3, 4].invokeMethod([1, 2], size, [])"); - } - - @Test - public void testDynamicBlacklist() throws Exception { - int nodes = randomIntBetween(1, 3); - Settings nodeSettings = ImmutableSettings.builder() - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, true) - .build(); - internalCluster().startNodesAsync(nodes, nodeSettings).get(); - client().admin().cluster().prepareHealth().setWaitForNodes(nodes + "").get(); - - client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get(); - - testSuccess("[doc['foo'].value, 3, 4].isEmpty()"); - testSuccess("[doc['foo'].value, 3, 4].size()"); - - // Now we blacklist two methods, .isEmpty() and .size() - Settings blacklistSettings = ImmutableSettings.builder() - .put(GroovyScriptEngineService.GROOVY_SCRIPT_BLACKLIST_PATCH, "isEmpty,size") - .build(); - - client().admin().cluster().prepareUpdateSettings().setTransientSettings(blacklistSettings).get(); - - testFailure("[doc['foo'].value, 3, 4].isEmpty()", - "Expression [MethodCallExpression] is not allowed: [doc[foo].value, 3, 4].isEmpty()"); - testFailure("[doc['foo'].value, 3, 4].size()", - "Expression [MethodCallExpression] is not allowed: [doc[foo].value, 3, 4].size()"); - } - - public void testSuccess(String script) { - logger.info("--> script: " + script); - SearchResponse resp = client().prepareSearch("test") - .setSource("{\"query\": {\"match_all\": {}}," + - "\"sort\":{\"_script\": {\"script\": \""+ script + - "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); - assertNoFailures(resp); - assertThat(resp.getHits().getAt(0).getSortValues(), equalTo(new Object[]{7.0})); - } - - public void testFailure(String script, String failMessage) { - logger.info("--> script: " + script); - try { - client().prepareSearch("test") - .setSource("{\"query\": {\"match_all\": {}}," + - "\"sort\":{\"_script\": {\"script\": \""+ script + - "; doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); - fail("script: " + script + " failed to be caught be the sandbox!"); - } catch (SearchPhaseExecutionException e) { - String msg = ExceptionsHelper.detailedMessage(ExceptionsHelper.unwrapCause(e)); - assertThat("script failed, but with incorrect message: " + msg, msg.contains(failMessage), equalTo(true)); - } - } -} diff --git a/src/test/java/org/elasticsearch/script/GroovyScriptTests.java b/src/test/java/org/elasticsearch/script/GroovyScriptTests.java index 801f4b36e40..657d2bc361e 100644 --- a/src/test/java/org/elasticsearch/script/GroovyScriptTests.java +++ b/src/test/java/org/elasticsearch/script/GroovyScriptTests.java @@ -76,12 +76,12 @@ public class GroovyScriptTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setQuery(constantScoreQuery(scriptFilter("1 == not_found").lang(GroovyScriptEngineService.NAME))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should not contained NotSerializableTransportException", - ExceptionsHelper.detailedMessage(e).contains("NotSerializableTransportException"), equalTo(false)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained GroovyScriptExecutionException", - ExceptionsHelper.detailedMessage(e).contains("GroovyScriptExecutionException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained not_found", - ExceptionsHelper.detailedMessage(e).contains("No such property: not_found"), equalTo(true)); + assertThat(e.toString()+ "should not contained NotSerializableTransportException", + e.toString().contains("NotSerializableTransportException"), equalTo(false)); + assertThat(e.toString()+ "should have contained GroovyScriptExecutionException", + e.toString().contains("GroovyScriptExecutionException"), equalTo(true)); + assertThat(e.toString()+ "should have contained not_found", + e.toString().contains("No such property: not_found"), equalTo(true)); } try { @@ -89,12 +89,12 @@ public class GroovyScriptTests extends ElasticsearchIntegrationTest { scriptFilter("assert false").lang("groovy"))).get(); fail("should have thrown an exception"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should not contained NotSerializableTransportException", - ExceptionsHelper.detailedMessage(e).contains("NotSerializableTransportException"), equalTo(false)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained GroovyScriptExecutionException", - ExceptionsHelper.detailedMessage(e).contains("GroovyScriptExecutionException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained an assert error", - ExceptionsHelper.detailedMessage(e).contains("PowerAssertionError[assert false"), equalTo(true)); + assertThat(e.toString()+ "should not contained NotSerializableTransportException", + e.toString().contains("NotSerializableTransportException"), equalTo(false)); + assertThat(e.toString()+ "should have contained GroovyScriptExecutionException", + e.toString().contains("GroovyScriptExecutionException"), equalTo(true)); + assertThat(e.toString()+ "should have contained an assert error", + e.toString().contains("PowerAssertionError[assert false"), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/script/IndexLookupTests.java b/src/test/java/org/elasticsearch/script/IndexLookupTests.java index 85940106a35..cc1900a3fb2 100644 --- a/src/test/java/org/elasticsearch/script/IndexLookupTests.java +++ b/src/test/java/org/elasticsearch/script/IndexLookupTests.java @@ -178,8 +178,8 @@ public class IndexLookupTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet(); } catch (SearchPhaseExecutionException e) { assertThat( - "got: " + e.getDetailedMessage(), - e.getDetailedMessage() + "got: " + e.toString(), + e.toString() .indexOf( "You must call get with all required flags! Instead of _index['int_payload_field'].get('b', _FREQUENCIES) and _index['int_payload_field'].get('b', _POSITIONS) call _index['int_payload_field'].get('b', _FREQUENCIES | _POSITIONS) once]"), Matchers.greaterThan(-1)); @@ -581,8 +581,8 @@ public class IndexLookupTests extends ElasticsearchIntegrationTest { } } catch (SearchPhaseExecutionException ex) { assertThat( - "got " + ex.getDetailedMessage(), - ex.getDetailedMessage().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."), + "got " + ex.toString(), + ex.toString().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."), Matchers.greaterThan(-1)); } } diff --git a/src/test/java/org/elasticsearch/script/IndexedScriptTests.java b/src/test/java/org/elasticsearch/script/IndexedScriptTests.java index ac44e4d6dbc..2eb881156bd 100644 --- a/src/test/java/org/elasticsearch/script/IndexedScriptTests.java +++ b/src/test/java/org/elasticsearch/script/IndexedScriptTests.java @@ -158,20 +158,20 @@ public class IndexedScriptTests extends ElasticsearchIntegrationTest { fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled")); + assertThat(e.getCause().toString(), containsString("scripts of type [indexed], operation [update] and lang [expression] are disabled")); } try { String query = "{ \"script_fields\" : { \"test1\" : { \"script_id\" : \"script1\", \"lang\":\"expression\" }}}"; client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get(); fail("search script should have been rejected"); } catch(Exception e) { - assertThat(e.getMessage(), containsString("scripts of type [indexed], operation [search] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [indexed], operation [search] and lang [expression] are disabled")); } try { String source = "{\"aggs\": {\"test\": { \"terms\" : { \"script_id\":\"script1\", \"script_lang\":\"expression\" } } } }"; client().prepareSearch("test").setSource(source).get(); } catch(Exception e) { - assertThat(e.getMessage(), containsString("scripts of type [indexed], operation [aggs] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [indexed], operation [aggs] and lang [expression] are disabled")); } } } diff --git a/src/test/java/org/elasticsearch/script/NativeScriptTests.java b/src/test/java/org/elasticsearch/script/NativeScriptTests.java index 1385fa7b54d..951f605801e 100644 --- a/src/test/java/org/elasticsearch/script/NativeScriptTests.java +++ b/src/test/java/org/elasticsearch/script/NativeScriptTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.env.Environment; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -59,7 +58,7 @@ public class NativeScriptTests extends ElasticsearchTestCase { ScriptService scriptService = injector.getInstance(ScriptService.class); - ExecutableScript executable = scriptService.executable(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, ScriptContext.Standard.SEARCH, null); + ExecutableScript executable = scriptService.executable(new Script(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, null), ScriptContext.Standard.SEARCH); assertThat(executable.run().toString(), equalTo("test")); terminate(injector.getInstance(ThreadPool.class)); } @@ -81,10 +80,10 @@ public class NativeScriptTests extends ElasticsearchTestCase { nativeScriptFactoryMap.put("my", new MyNativeScriptFactory()); Set scriptEngineServices = ImmutableSet.of(new NativeScriptEngineService(settings, nativeScriptFactoryMap)); ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Lists.newArrayList()); - ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, new NodeSettingsService(settings), scriptContextRegistry); + ScriptService scriptService = new ScriptService(settings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry); for (ScriptContext scriptContext : scriptContextRegistry.scriptContexts()) { - assertThat(scriptService.compile(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, scriptContext), notNullValue()); + assertThat(scriptService.compile(new Script(NativeScriptEngineService.NAME, "my", ScriptType.INLINE, null), scriptContext), notNullValue()); } } diff --git a/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java b/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java index 78a28520d4a..88471f712b4 100644 --- a/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java +++ b/src/test/java/org/elasticsearch/script/OnDiskScriptTests.java @@ -107,7 +107,7 @@ public class OnDiskScriptTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setSource(source).get(); fail("aggs script should have been rejected"); } catch(Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [aggs] and lang [expression] are disabled")); + assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [expression] are disabled")); } String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"expression\" }}, size:1}"; @@ -128,21 +128,21 @@ public class OnDiskScriptTests extends ElasticsearchIntegrationTest { client().prepareSearch("test").setSource(source).get(); fail("aggs script should have been rejected"); } catch(Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [aggs] and lang [mustache] are disabled")); + assertThat(e.toString(), containsString("scripts of type [file], operation [aggs] and lang [mustache] are disabled")); } String query = "{ \"query\" : { \"match_all\": {}} , \"script_fields\" : { \"test1\" : { \"script_file\" : \"script1\", \"lang\":\"mustache\" }}, size:1}"; try { client().prepareSearch().setSource(query).setIndices("test").setTypes("scriptTest").get(); fail("search script should have been rejected"); } catch(Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [search] and lang [mustache] are disabled")); + assertThat(e.toString(), containsString("scripts of type [file], operation [search] and lang [mustache] are disabled")); } try { client().prepareUpdate("test", "scriptTest", "1").setScript("script1", ScriptService.ScriptType.FILE).setScriptLang(MustacheScriptEngineService.NAME).get(); fail("update script should have been rejected"); } catch(Exception e) { assertThat(e.getMessage(), containsString("failed to execute script")); - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [file], operation [update] and lang [mustache] are disabled")); + assertThat(e.getCause().toString(), containsString("scripts of type [file], operation [update] and lang [mustache] are disabled")); } } } diff --git a/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java b/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java deleted file mode 100644 index 0a759c07b98..00000000000 --- a/src/test/java/org/elasticsearch/script/SandboxDisabledTests.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.script; - -import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.common.settings.ImmutableSettings; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; -import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.junit.Test; - -import static org.hamcrest.Matchers.containsString; - -/** - * Test that a system where the sandbox is disabled while dynamic scripting is - * also disabled does not allow a script to be sent - */ -@ElasticsearchIntegrationTest.ClusterScope(scope=ElasticsearchIntegrationTest.Scope.SUITE) -public class SandboxDisabledTests extends ElasticsearchIntegrationTest { - - @Override - protected Settings nodeSettings(int nodeOrdinal) { - return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, false) - .put("script.inline", false).build(); - } - - @Test - public void testScriptingDisabledWhileSandboxDisabled() { - client().prepareIndex("test", "doc", "1").setSource("foo", 5).setRefresh(true).get(); - try { - client().prepareSearch("test") - .setSource("{\"query\": {\"match_all\": {}}," + - "\"sort\":{\"_script\": {\"script\": \"doc['foo'].value + 2\", \"type\": \"number\", \"lang\": \"groovy\"}}}").get(); - fail("shards should fail because the sandbox and dynamic scripting are disabled"); - } catch (Exception e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("scripts of type [inline], operation [search] and lang [groovy] are disabled")); - } - } -} diff --git a/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java b/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java index bfc775e449a..b96391682fe 100644 --- a/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptContextRegistryTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.script; import com.google.common.collect.Lists; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; import org.junit.Test; @@ -36,7 +35,7 @@ public class ScriptContextRegistryTests extends ElasticsearchTestCase { //try to register a prohibited script context new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("test", rejectedContext))); fail("ScriptContextRegistry initialization should have failed"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), Matchers.containsString("[" + rejectedContext + "] is a reserved name, it cannot be registered as a custom script context")); } } @@ -49,18 +48,18 @@ public class ScriptContextRegistryTests extends ElasticsearchTestCase { //try to register a prohibited script context new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin(rejectedContext, "test"))); fail("ScriptContextRegistry initialization should have failed"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), Matchers.containsString("[" + rejectedContext + "] is a reserved name, it cannot be registered as a custom script context")); } } } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testValidateCustomScriptContextsEmptyPluginName() throws IOException { new ScriptContext.Plugin(randomBoolean() ? null : "", "test"); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testValidateCustomScriptContextsEmptyOperation() throws IOException { new ScriptContext.Plugin("test", randomBoolean() ? null : ""); } @@ -71,7 +70,7 @@ public class ScriptContextRegistryTests extends ElasticsearchTestCase { //try to register a prohibited script context new ScriptContextRegistry(Lists.newArrayList(new ScriptContext.Plugin("testplugin", "test"), new ScriptContext.Plugin("testplugin", "test"))); fail("ScriptContextRegistry initialization should have failed"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), Matchers.containsString("script context [testplugin_test] cannot be registered twice")); } } diff --git a/src/test/java/org/elasticsearch/script/ScriptModesTests.java b/src/test/java/org/elasticsearch/script/ScriptModesTests.java index 119a5b91106..f0051d188bb 100644 --- a/src/test/java/org/elasticsearch/script/ScriptModesTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptModesTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.script; import com.google.common.collect.*; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.script.ScriptService.ScriptType; @@ -106,7 +105,7 @@ public class ScriptModesTests extends ElasticsearchTestCase { assertScriptModesAllOps(ScriptMode.SANDBOX, ALL_LANGS, ScriptType.INDEXED, ScriptType.INLINE); } - @Test(expected = ElasticsearchIllegalArgumentException.class) + @Test(expected = IllegalArgumentException.class) public void testMissingSetting() { assertAllSettingsWereChecked = false; this.scriptModes = new ScriptModes(scriptEngines, scriptContextRegistry, ImmutableSettings.EMPTY); diff --git a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 0187d22c5d4..69aa8685835 100644 --- a/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -20,13 +20,11 @@ package org.elasticsearch.script; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Maps; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; -import org.elasticsearch.node.settings.NodeSettingsService; import org.elasticsearch.script.ScriptService.ScriptType; import org.elasticsearch.script.expression.ExpressionScriptEngineService; import org.elasticsearch.script.groovy.GroovyScriptEngineService; @@ -103,7 +101,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { private void buildScriptService(Settings additionalSettings) throws IOException { Settings finalSettings = ImmutableSettings.builder().put(baseSettings).put(additionalSettings).build(); Environment environment = new Environment(finalSettings); - scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, new NodeSettingsService(finalSettings), scriptContextRegistry) { + scriptService = new ScriptService(finalSettings, environment, scriptEngineServices, resourceWatcherService, scriptContextRegistry) { @Override String getScriptFromIndex(String scriptLang, String id) { //mock the script that gets retrieved from an index @@ -117,7 +115,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { try { buildScriptService(ImmutableSettings.builder().put(ScriptService.DISABLE_DYNAMIC_SCRIPTING_SETTING, randomUnicodeOfLength(randomIntBetween(1, 10))).build()); fail("script service should have thrown exception due to non supported script.disable_dynamic setting"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString(ScriptService.DISABLE_DYNAMIC_SCRIPTING_SETTING + " is not a supported setting, replace with fine-grained script settings")); } } @@ -133,7 +131,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { resourceWatcherService.notifyNow(); logger.info("--> verify that file with extension was correctly processed"); - CompiledScript compiledScript = scriptService.compile("test", "test_script", ScriptType.FILE, ScriptContext.Standard.SEARCH); + CompiledScript compiledScript = scriptService.compile(new Script("test", "test_script", ScriptType.FILE, null), ScriptContext.Standard.SEARCH); assertThat(compiledScript.compiled(), equalTo((Object) "compiled_test_file")); logger.info("--> delete both files"); @@ -143,9 +141,9 @@ public class ScriptServiceTests extends ElasticsearchTestCase { logger.info("--> verify that file with extension was correctly removed"); try { - scriptService.compile("test", "test_script", ScriptType.FILE, ScriptContext.Standard.SEARCH); + scriptService.compile(new Script("test", "test_script", ScriptType.FILE, null), ScriptContext.Standard.SEARCH); fail("the script test_script should no longer exist"); - } catch (ElasticsearchIllegalArgumentException ex) { + } catch (IllegalArgumentException ex) { assertThat(ex.getMessage(), containsString("Unable to find on disk script test_script")); } } @@ -154,17 +152,17 @@ public class ScriptServiceTests extends ElasticsearchTestCase { public void testScriptsSameNameDifferentLanguage() throws IOException { buildScriptService(ImmutableSettings.EMPTY); createFileScripts("groovy", "expression"); - CompiledScript groovyScript = scriptService.compile(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, randomFrom(scriptContexts)); + CompiledScript groovyScript = scriptService.compile(new Script(GroovyScriptEngineService.NAME, "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); assertThat(groovyScript.lang(), equalTo(GroovyScriptEngineService.NAME)); - CompiledScript expressionScript = scriptService.compile(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, randomFrom(scriptContexts)); + CompiledScript expressionScript = scriptService.compile(new Script(ExpressionScriptEngineService.NAME, "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); assertThat(expressionScript.lang(), equalTo(ExpressionScriptEngineService.NAME)); } @Test public void testInlineScriptCompiledOnceMultipleLangAcronyms() throws IOException { buildScriptService(ImmutableSettings.EMPTY); - CompiledScript compiledScript1 = scriptService.compile("test", "script", ScriptType.INLINE, randomFrom(scriptContexts)); - CompiledScript compiledScript2 = scriptService.compile("test2", "script", ScriptType.INLINE, randomFrom(scriptContexts)); + CompiledScript compiledScript1 = scriptService.compile(new Script("test", "script", ScriptType.INLINE, null), randomFrom(scriptContexts)); + CompiledScript compiledScript2 = scriptService.compile(new Script("test2", "script", ScriptType.INLINE, null), randomFrom(scriptContexts)); assertThat(compiledScript1, sameInstance(compiledScript2)); } @@ -172,8 +170,8 @@ public class ScriptServiceTests extends ElasticsearchTestCase { public void testFileScriptCompiledOnceMultipleLangAcronyms() throws IOException { buildScriptService(ImmutableSettings.EMPTY); createFileScripts("test"); - CompiledScript compiledScript1 = scriptService.compile("test", "file_script", ScriptType.FILE, randomFrom(scriptContexts)); - CompiledScript compiledScript2 = scriptService.compile("test2", "file_script", ScriptType.FILE, randomFrom(scriptContexts)); + CompiledScript compiledScript1 = scriptService.compile(new Script("test", "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); + CompiledScript compiledScript2 = scriptService.compile(new Script("test2", "file_script", ScriptType.FILE, null), randomFrom(scriptContexts)); assertThat(compiledScript1, sameInstance(compiledScript2)); } @@ -350,9 +348,9 @@ public class ScriptServiceTests extends ElasticsearchTestCase { for (ScriptEngineService scriptEngineService : scriptEngineServices) { for (String type : scriptEngineService.types()) { try { - scriptService.compile(type, "test", randomFrom(ScriptType.values()), new ScriptContext.Plugin(pluginName, unknownContext)); + scriptService.compile(new Script(type, "test", randomFrom(ScriptType.values()), null), new ScriptContext.Plugin(pluginName, unknownContext)); fail("script compilation should have been rejected"); - } catch(ElasticsearchIllegalArgumentException e) { + } catch(IllegalArgumentException e) { assertThat(e.getMessage(), containsString("script context [" + pluginName + "_" + unknownContext + "] not supported")); } } @@ -369,7 +367,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { private void assertCompileRejected(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { try { - scriptService.compile(lang, script, scriptType, scriptContext); + scriptService.compile(new Script(lang, script, scriptType, null), scriptContext); fail("compile should have been rejected for lang [" + lang + "], script_type [" + scriptType + "], scripted_op [" + scriptContext + "]"); } catch(ScriptException e) { //all good @@ -377,7 +375,7 @@ public class ScriptServiceTests extends ElasticsearchTestCase { } private void assertCompileAccepted(String lang, String script, ScriptType scriptType, ScriptContext scriptContext) { - assertThat(scriptService.compile(lang, script, scriptType, scriptContext), notNullValue()); + assertThat(scriptService.compile(new Script(lang, script, scriptType, null), scriptContext), notNullValue()); } public static class TestEngineService implements ScriptEngineService { diff --git a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java index 9ef64d82477..1f04063a42d 100644 --- a/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java +++ b/src/test/java/org/elasticsearch/script/expression/ExpressionScriptTests.java @@ -61,6 +61,15 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { } public void testBasic() throws Exception { + createIndex("test"); + ensureGreen("test"); + client().prepareIndex("test", "doc", "1").setSource("foo", 4).setRefresh(true).get(); + SearchResponse rsp = buildRequest("doc['foo'] + 1").get(); + assertEquals(1, rsp.getHits().getTotalHits()); + assertEquals(5.0, rsp.getHits().getAt(0).field("foo").getValue()); + } + + public void testBasicUsingDotValue() throws Exception { createIndex("test"); ensureGreen("test"); client().prepareIndex("test", "doc", "1").setSource("foo", 4).setRefresh(true).get(); @@ -89,13 +98,56 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { assertEquals("2", hits.getAt(2).getId()); } + public void testDateMethods() throws Exception { + ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "date0", "type=date", "date1", "type=date")); + ensureGreen("test"); + indexRandom(true, + client().prepareIndex("test", "doc", "1").setSource("date0", "2015-04-28T04:02:07Z", "date1", "1985-09-01T23:11:01Z"), + client().prepareIndex("test", "doc", "2").setSource("date0", "2013-12-25T11:56:45Z", "date1", "1983-10-13T23:15:00Z")); + SearchResponse rsp = buildRequest("doc['date0'].getSeconds() - doc['date0'].getMinutes()").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + SearchHits hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue()); + assertEquals(-11.0, hits.getAt(1).field("foo").getValue()); + rsp = buildRequest("doc['date0'].getHourOfDay() + doc['date1'].getDayOfMonth()").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + hits = rsp.getHits(); + assertEquals(5.0, hits.getAt(0).field("foo").getValue()); + assertEquals(24.0, hits.getAt(1).field("foo").getValue()); + rsp = buildRequest("doc['date1'].getMonth() + 1").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + hits = rsp.getHits(); + assertEquals(9.0, hits.getAt(0).field("foo").getValue()); + assertEquals(10.0, hits.getAt(1).field("foo").getValue()); + rsp = buildRequest("doc['date1'].getYear()").get(); + assertEquals(2, rsp.getHits().getTotalHits()); + hits = rsp.getHits(); + assertEquals(1985.0, hits.getAt(0).field("foo").getValue()); + assertEquals(1983.0, hits.getAt(1).field("foo").getValue()); + } + + public void testInvalidDateMethodCall() throws Exception { + ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "double", "type=double")); + ensureGreen("test"); + indexRandom(true, client().prepareIndex("test", "doc", "1").setSource("double", "178000000.0")); + try { + buildRequest("doc['double'].getYear()").get(); + fail(); + } catch (SearchPhaseExecutionException e) { + assertThat(e.toString() + "should have contained IllegalArgumentException", + e.toString().contains("IllegalArgumentException"), equalTo(true)); + assertThat(e.toString() + "should have contained can only be used with a date field type", + e.toString().contains("can only be used with a date field type"), equalTo(true)); + } + } + public void testSparseField() throws Exception { ElasticsearchAssertions.assertAcked(prepareCreate("test").addMapping("doc", "x", "type=long", "y", "type=long")); ensureGreen("test"); indexRandom(true, client().prepareIndex("test", "doc", "1").setSource("x", 4), client().prepareIndex("test", "doc", "2").setSource("y", 2)); - SearchResponse rsp = buildRequest("doc['x'].value + 1").get(); + SearchResponse rsp = buildRequest("doc['x'] + 1").get(); ElasticsearchAssertions.assertSearchResponse(rsp); SearchHits hits = rsp.getHits(); assertEquals(2, rsp.getHits().getTotalHits()); @@ -108,13 +160,13 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { ensureGreen("test"); client().prepareIndex("test", "doc", "1").setSource("x", 4).setRefresh(true).get(); try { - buildRequest("doc['bogus'].value").get(); + buildRequest("doc['bogus']").get(); fail("Expected missing field to cause failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained missing field error", - ExceptionsHelper.detailedMessage(e).contains("does not exist in mappings"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained missing field error", + e.toString().contains("does not exist in mappings"), equalTo(true)); } } @@ -126,7 +178,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { client().prepareIndex("test", "doc", "2").setSource("x", 3), client().prepareIndex("test", "doc", "3").setSource("x", 5)); // a = int, b = double, c = long - String script = "doc['x'].value * a + b + ((c + doc['x'].value) > 5000000009 ? 1 : 0)"; + String script = "doc['x'] * a + b + ((c + doc['x']) > 5000000009 ? 1 : 0)"; SearchResponse rsp = buildRequest(script, "a", 2, "b", 3.5, "c", 5000000000L).get(); SearchHits hits = rsp.getHits(); assertEquals(3, hits.getTotalHits()); @@ -141,10 +193,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("garbage%@#%@").get(); fail("Expected expression compilation failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained compilation failure", - ExceptionsHelper.detailedMessage(e).contains("Failed to parse expression"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained compilation failure", + e.toString().contains("Failed to parse expression"), equalTo(true)); } } @@ -154,23 +206,23 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("a", "a", "astring").get(); fail("Expected string parameter to cause failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained non-numeric parameter error", - ExceptionsHelper.detailedMessage(e).contains("must be a numeric type"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained non-numeric parameter error", + e.toString().contains("must be a numeric type"), equalTo(true)); } } public void testNonNumericField() { client().prepareIndex("test", "doc", "1").setSource("text", "this is not a number").setRefresh(true).get(); try { - buildRequest("doc['text'].value").get(); + buildRequest("doc['text']").get(); fail("Expected text field to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained non-numeric field error", - ExceptionsHelper.detailedMessage(e).contains("must be numeric"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained non-numeric field error", + e.toString().contains("must be numeric"), equalTo(true)); } } @@ -180,10 +232,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("bogus").get(); fail("Expected bogus variable to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained unknown variable error", - ExceptionsHelper.detailedMessage(e).contains("Unknown variable"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained unknown variable error", + e.toString().contains("Unknown variable"), equalTo(true)); } } @@ -193,10 +245,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("doc").get(); fail("Expected doc variable without field to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained a missing specific field error", - ExceptionsHelper.detailedMessage(e).contains("must be used with a specific field"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained a missing specific field error", + e.toString().contains("must be used with a specific field"), equalTo(true)); } } @@ -206,10 +258,10 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { buildRequest("doc['foo'].bogus").get(); fail("Expected bogus field member to cause execution failure"); } catch (SearchPhaseExecutionException e) { - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained ExpressionScriptCompilationException", - ExceptionsHelper.detailedMessage(e).contains("ExpressionScriptCompilationException"), equalTo(true)); - assertThat(ExceptionsHelper.detailedMessage(e) + "should have contained field member error", - ExceptionsHelper.detailedMessage(e).contains("Invalid member for field"), equalTo(true)); + assertThat(e.toString() + "should have contained ExpressionScriptCompilationException", + e.toString().contains("ExpressionScriptCompilationException"), equalTo(true)); + assertThat(e.toString() + "should have contained member variable [value] or member methods may be accessed", + e.toString().contains("member variable [value] or member methods may be accessed"), equalTo(true)); } } @@ -260,7 +312,7 @@ public class ExpressionScriptTests extends ElasticsearchIntegrationTest { assertThat(rsp.getShardFailures().length, greaterThan(0)); // at least the shards containing the docs should have failed message = rsp.getShardFailures()[0].reason(); } catch (SearchPhaseExecutionException e) { - message = ExceptionsHelper.detailedMessage(e); + message = e.toString(); } assertThat(message + "should have contained ExpressionScriptExecutionException", message.contains("ExpressionScriptExecutionException"), equalTo(true)); diff --git a/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java b/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java index ab01e465588..3b6f0f479f1 100644 --- a/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java +++ b/src/test/java/org/elasticsearch/script/mustache/MustacheScriptEngineTest.java @@ -18,7 +18,6 @@ */ package org.elasticsearch.script.mustache; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.test.ElasticsearchTestCase; @@ -38,10 +37,12 @@ import static org.hamcrest.Matchers.equalTo; */ public class MustacheScriptEngineTest extends ElasticsearchTestCase { private MustacheScriptEngineService qe; + private JsonEscapingMustacheFactory escaper; @Before public void setup() { qe = new MustacheScriptEngineService(ImmutableSettings.Builder.EMPTY_SETTINGS); + escaper = new JsonEscapingMustacheFactory(); } @Test @@ -73,43 +74,98 @@ public class MustacheScriptEngineTest extends ElasticsearchTestCase { public void testEscapeJson() throws IOException { { StringWriter writer = new StringWriter(); - JsonEscapingMustacheFactory.escape("hello \n world", writer); - assertThat(writer.toString(), equalTo("hello \\\n world")); + escaper.encode("hello \n world", writer); + assertThat(writer.toString(), equalTo("hello \\n world")); } { StringWriter writer = new StringWriter(); - JsonEscapingMustacheFactory.escape("\n", writer); - assertThat(writer.toString(), equalTo("\\\n")); + escaper.encode("\n", writer); + assertThat(writer.toString(), equalTo("\\n")); } - Character[] specialChars = new Character[]{'\f', '\n', '\r', '"', '\\', (char) 11, '\t', '\b' }; + Character[] specialChars = new Character[]{ + '\"', + '\\', + '\u0000', + '\u0001', + '\u0002', + '\u0003', + '\u0004', + '\u0005', + '\u0006', + '\u0007', + '\u0008', + '\u0009', + '\u000B', + '\u000C', + '\u000E', + '\u000F', + '\u001F'}; + String[] escapedChars = new String[]{ + "\\\"", + "\\\\", + "\\u0000", + "\\u0001", + "\\u0002", + "\\u0003", + "\\u0004", + "\\u0005", + "\\u0006", + "\\u0007", + "\\u0008", + "\\u0009", + "\\u000B", + "\\u000C", + "\\u000E", + "\\u000F", + "\\u001F"}; int iters = scaledRandomIntBetween(100, 1000); for (int i = 0; i < iters; i++) { int rounds = scaledRandomIntBetween(1, 20); - StringWriter escaped = new StringWriter(); + StringWriter expect = new StringWriter(); StringWriter writer = new StringWriter(); for (int j = 0; j < rounds; j++) { String s = getChars(); writer.write(s); - escaped.write(s); - char c = RandomPicks.randomFrom(getRandom(), specialChars); - writer.append(c); - escaped.append('\\'); - escaped.append(c); + expect.write(s); + + int charIndex = randomInt(7); + writer.append(specialChars[charIndex]); + expect.append(escapedChars[charIndex]); } StringWriter target = new StringWriter(); - assertThat(escaped.toString(), equalTo(JsonEscapingMustacheFactory.escape(writer.toString(), target).toString())); + escaper.encode(writer.toString(), target); + assertThat(expect.toString(), equalTo(target.toString())); } } private String getChars() { String string = randomRealisticUnicodeOfCodepointLengthBetween(0, 10); for (int i = 0; i < string.length(); i++) { - if (JsonEscapingMustacheFactory.isEscapeChar(string.charAt(i))) { + if (isEscapeChar(string.charAt(i))) { return string.substring(0, i); } } return string; } - + + /** + * From https://www.ietf.org/rfc/rfc4627.txt: + * + * All Unicode characters may be placed within the + * quotation marks except for the characters that must be escaped: + * quotation mark, reverse solidus, and the control characters (U+0000 + * through U+001F). + * */ + private static boolean isEscapeChar(char c) { + switch (c) { + case '"': + case '\\': + return true; + } + + if (c < '\u002F') + return true; + return false; + } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java b/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java index e7b1d8fbc59..640ac7ffb81 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/AggregationsIntegrationTests.java @@ -57,7 +57,7 @@ public class AggregationsIntegrationTests extends ElasticsearchIntegrationTest { client().prepareSearch("index").setSearchType(SearchType.SCAN).setScroll(new TimeValue(500)).addAggregation(terms("f").field("f")).get(); fail(); } catch (SearchPhaseExecutionException e) { - assertTrue(e.getMessage(), e.getMessage().contains("aggregations are not supported with search_type=scan")); + assertTrue(e.toString(), e.toString().contains("aggregations are not supported with search_type=scan")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java b/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java index 5079e6730dd..d318ed5ca7b 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/EquivalenceTests.java @@ -42,7 +42,6 @@ import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory import org.elasticsearch.search.aggregations.metrics.sum.Sum; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Iterator; @@ -303,7 +302,7 @@ public class EquivalenceTests extends ElasticsearchIntegrationTest { SearchResponse resp = client().prepareSearch("idx") .addAggregation(terms("terms").field("values").collectMode(randomFrom(SubAggCollectionMode.values())).script("floor(_value / interval)").param("interval", interval).size(maxNumTerms)) - .addAggregation(histogram("histo").field("values").interval(interval)) + .addAggregation(histogram("histo").field("values").interval(interval).minDocCount(1)) .execute().actionGet(); assertSearchResponse(resp); diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java index b34d09bfc1a..d3114d20283 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java @@ -170,7 +170,7 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest { @Test public void singleValuedField_WithTimeZone() throws Exception { SearchResponse response = client().prepareSearch("idx") - .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).timeZone("+01:00")).execute() + .addAggregation(dateHistogram("histo").field("date").interval(DateHistogramInterval.DAY).minDocCount(1).timeZone("+01:00")).execute() .actionGet(); DateTimeZone tz = DateTimeZone.forID("+01:00"); assertSearchResponse(response); @@ -1314,7 +1314,7 @@ public class DateHistogramTests extends ElasticsearchIntegrationTest { .actionGet(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("IllegalArgumentException")); + assertThat(e.toString(), containsString("IllegalArgumentException")); } } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java index 58a50e8938a..9a6c7c0f9f1 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/HistogramTests.java @@ -1017,7 +1017,7 @@ public class HistogramTests extends ElasticsearchIntegrationTest { .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(-1).minDocCount(0)).execute().actionGet(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("Missing required field [interval]")); + assertThat(e.toString(), containsString("Missing required field [interval]")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java index ac28cd4c8e4..d437eb34915 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/NestedTests.java @@ -365,7 +365,7 @@ public class NestedTests extends ElasticsearchIntegrationTest { .execute().actionGet(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[nested] nested path [incorrect] is not nested")); + assertThat(e.toString(), containsString("[nested] nested path [incorrect] is not nested")); } } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java index 742fd2cd1fa..db2d86c742a 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsTests.java @@ -65,8 +65,8 @@ public class SignificantTermsTests extends ElasticsearchIntegrationTest { @Override public Settings indexSettings() { return ImmutableSettings.builder() - .put("index.number_of_shards", between(1, 5)) - .put("index.number_of_replicas", between(0, 1)) + .put("index.number_of_shards", numberOfShards()) + .put("index.number_of_replicas", numberOfReplicas()) .build(); } diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java index 98df4c4369e..70bedbdb40f 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/TopHitsTests.java @@ -520,7 +520,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { ).get(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("No mapping found for [xyz] in order to sort on")); + assertThat(e.toString(), containsString("No mapping found for [xyz] in order to sort on")); } } @@ -553,7 +553,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { .get(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations")); + assertThat(e.toString(), containsString("Aggregator [top_tags_hits] of type [top_hits] cannot accept sub-aggregations")); } } @@ -813,7 +813,7 @@ public class TopHitsTests extends ElasticsearchIntegrationTest { // Can't explain nested hit with the main query, since both are in a different scopes, also the nested doc may not even have matched with the main query // If top_hits would have a query option then we can explain that query Explanation explanation = searchHit.explanation(); - assertThat(explanation.toString(), containsString("Not a match")); + assertFalse(explanation.isMatch()); // Returns the version of the root document. Nested docs don't have a separate version long version = searchHit.version(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java index b84716bf557..855f21de852 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregatorTest.java @@ -30,15 +30,14 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.TermQuery; import org.apache.lucene.store.Directory; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.common.compress.CompressedString; +import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.internal.TypeFieldMapper; import org.elasticsearch.index.mapper.internal.UidFieldMapper; -import org.elasticsearch.index.search.nested.NonNestedDocsFilter; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.BucketCollector; @@ -123,7 +122,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeTest { AggregationContext context = new AggregationContext(searchContext); AggregatorFactories.Builder builder = AggregatorFactories.builder(); - builder.add(new NestedAggregator.Factory("test", "nested_field", QueryCachingPolicy.ALWAYS_CACHE)); + builder.addAggregator(new NestedAggregator.Factory("test", "nested_field")); AggregatorFactories factories = builder.build(); searchContext.aggregations(new SearchContextAggregations(factories)); Aggregator[] aggs = factories.createTopLevelAggregators(context); @@ -133,7 +132,7 @@ public class NestedAggregatorTest extends ElasticsearchSingleNodeTest { // We exclude root doc with uid type#2, this will trigger the bug if we don't reset the root doc when we process a new segment, because // root doc type#3 and root doc type#1 have the same segment docid BooleanQuery bq = new BooleanQuery(); - bq.add(NonNestedDocsFilter.INSTANCE, Occur.MUST); + bq.add(Queries.newNonNestedFilter(), Occur.MUST); bq.add(new TermQuery(new Term(UidFieldMapper.NAME, "type#2")), Occur.MUST_NOT); searcher.search(new ConstantScoreQuery(bq), collector); collector.postCollection(); diff --git a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java index a5b38728a0f..eeff6304dd3 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/bucket/significant/SignificanceHeuristicTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.aggregations.bucket.significant; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -30,7 +29,18 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.InternalAggregations; -import org.elasticsearch.search.aggregations.bucket.significant.heuristics.*; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ChiSquare; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.GND; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.JLHScore; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.MutualInformation; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.PercentageScore; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.ScriptHeuristic; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristic; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicBuilder; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParser; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicParserMapper; +import org.elasticsearch.search.aggregations.bucket.significant.heuristics.SignificanceHeuristicStreams; +import org.elasticsearch.search.aggregations.reducers.Reducer; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.TestSearchContext; @@ -40,6 +50,7 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -102,13 +113,15 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { if (randomBoolean()) { BytesRef term = new BytesRef("123.0"); buckets.add(new SignificantLongTerms.Bucket(1, 2, 3, 4, 123, InternalAggregations.EMPTY, null)); - sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets, null); + sTerms[0] = new SignificantLongTerms(10, 20, "some_name", null, 1, 1, heuristic, buckets, + (List) Collections.EMPTY_LIST, null); sTerms[1] = new SignificantLongTerms(); } else { BytesRef term = new BytesRef("someterm"); buckets.add(new SignificantStringTerms.Bucket(term, 1, 2, 3, 4, InternalAggregations.EMPTY)); - sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, null); + sTerms[0] = new SignificantStringTerms(10, 20, "some_name", 1, 1, heuristic, buckets, (List) Collections.EMPTY_LIST, + null); sTerms[1] = new SignificantStringTerms(); } return sTerms; @@ -210,35 +223,35 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { try { heuristicIsSuperset.getScore(2, 3, 1, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > supersetFreq")); } try { heuristicIsSuperset.getScore(1, 4, 2, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetSize > supersetSize")); } try { heuristicIsSuperset.getScore(2, 1, 3, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize")); } try { heuristicIsSuperset.getScore(1, 2, 4, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize")); } try { heuristicIsSuperset.getScore(1, 3, 4, 4); fail(); - } catch (ElasticsearchIllegalArgumentException assertionError) { + } catch (IllegalArgumentException assertionError) { assertNotNull(assertionError.getMessage()); assertTrue(assertionError.getMessage().contains("supersetFreq - subsetFreq > supersetSize - subsetSize")); } @@ -248,21 +261,21 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { values[idx] *= -1; heuristicIsSuperset.getScore(values[0], values[1], values[2], values[3]); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive")); } try { heuristicNotSuperset.getScore(2, 1, 3, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize")); } try { heuristicNotSuperset.getScore(1, 2, 4, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize")); } @@ -272,7 +285,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { values[idx] *= -1; heuristicNotSuperset.getScore(values[0], values[1], values[2], values[3]); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive")); } @@ -285,21 +298,21 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { values[idx] *= -1; heuristic.getScore(values[0], values[1], values[2], values[3]); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("Frequencies of subset and superset must be positive")); } try { heuristic.getScore(1, 2, 4, 3); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("supersetFreq > supersetSize")); } try { heuristic.getScore(2, 1, 3, 4); fail(); - } catch (ElasticsearchIllegalArgumentException illegalArgumentException) { + } catch (IllegalArgumentException illegalArgumentException) { assertNotNull(illegalArgumentException.getMessage()); assertTrue(illegalArgumentException.getMessage().contains("subsetFreq > subsetSize")); } @@ -339,7 +352,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { long c = randomLong(); long d = randomLong(); score = heuristic.getScore(a, b, c, d); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } assertThat(score, greaterThanOrEqualTo(0.0)); } @@ -362,7 +375,7 @@ public class SignificanceHeuristicTests extends ElasticsearchTestCase { long c = randomLong(); long d = randomLong(); score = heuristic.getScore(a, b, c, d); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } assertThat(score, lessThanOrEqualTo(1.0)); assertThat(score, greaterThanOrEqualTo(0.0)); diff --git a/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java b/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java index 623143a167b..21c3e702e9d 100644 --- a/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java +++ b/src/test/java/org/elasticsearch/search/aggregations/metrics/CardinalityTests.java @@ -46,8 +46,8 @@ public class CardinalityTests extends ElasticsearchIntegrationTest { @Override public Settings indexSettings() { return ImmutableSettings.builder() - .put("index.number_of_shards", between(1, 5)) - .put("index.number_of_replicas", between(0, 1)) + .put("index.number_of_shards", numberOfShards()) + .put("index.number_of_replicas", numberOfReplicas()) .build(); } diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java new file mode 100644 index 00000000000..b1ac6756f1e --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DateDerivativeTests.java @@ -0,0 +1,337 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.mapper.core.DateFieldMapper; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.joda.time.DateTime; +import org.joda.time.DateTimeZone; +import org.joda.time.format.DateTimeFormat; +import org.junit.After; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.dateHistogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class DateDerivativeTests extends ElasticsearchIntegrationTest { + + private DateTime date(int month, int day) { + return new DateTime(2012, month, day, 0, 0, DateTimeZone.UTC); + } + + private DateTime date(String date) { + return DateFieldMapper.Defaults.DATE_TIME_FORMATTER.parser().parseDateTime(date); + } + + private static String format(DateTime date, String pattern) { + return DateTimeFormat.forPattern(pattern).print(date); + } + + private IndexRequestBuilder indexDoc(String idx, DateTime date, int value) throws Exception { + return client().prepareIndex(idx, "type").setSource( + jsonBuilder().startObject().field("date", date).field("value", value).startArray("dates").value(date) + .value(date.plusMonths(1).plusDays(1)).endArray().endObject()); + } + + private IndexRequestBuilder indexDoc(int month, int day, int value) throws Exception { + return client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field("value", value).field("date", date(month, day)).startArray("dates") + .value(date(month, day)).value(date(month + 1, day + 1)).endArray().endObject()); + } + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + // TODO: would be nice to have more random data here + prepareCreate("empty_bucket_idx").addMapping("type", "value", "type=integer").execute().actionGet(); + List builders = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field("value", i * 2).endObject())); + } + builders.addAll(Arrays.asList(indexDoc(1, 2, 1), // date: Jan 2, dates: Jan 2, Feb 3 + indexDoc(2, 2, 2), // date: Feb 2, dates: Feb 2, Mar 3 + indexDoc(2, 15, 3), // date: Feb 15, dates: Feb 15, Mar 16 + indexDoc(3, 2, 4), // date: Mar 2, dates: Mar 2, Apr 3 + indexDoc(3, 15, 5), // date: Mar 15, dates: Mar 15, Apr 16 + indexDoc(3, 23, 6))); // date: Mar 23, dates: Mar 23, Apr 24 + indexRandom(true, builders); + ensureSearchable(); + } + + @After + public void afterEachTest() throws IOException { + internalCluster().wipeIndices("idx2"); + } + + @Test + public void singleValuedField() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, nullValue()); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1d)); + } + + @Test + public void singleValuedField_WithSubAggregation() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("sum")).subAggregation(sum("sum").field("value"))) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + Object[] propertiesKeys = (Object[]) histo.getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) histo.getProperty("_count"); + Object[] propertiesCounts = (Object[]) histo.getProperty("sum.value"); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(1.0)); + SimpleValue deriv = bucket.getAggregations().get("deriv"); + assertThat(deriv, nullValue()); + assertThat((DateTime) propertiesKeys[0], equalTo(key)); + assertThat((long) propertiesDocCounts[0], equalTo(1l)); + assertThat((double) propertiesCounts[0], equalTo(1.0)); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(5.0)); + deriv = bucket.getAggregations().get("deriv"); + assertThat(deriv, notNullValue()); + assertThat(deriv.value(), equalTo(4.0)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(4.0)); + assertThat((DateTime) propertiesKeys[1], equalTo(key)); + assertThat((long) propertiesDocCounts[1], equalTo(2l)); + assertThat((double) propertiesCounts[1], equalTo(5.0)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + assertThat(sum.getValue(), equalTo(15.0)); + deriv = bucket.getAggregations().get("deriv"); + assertThat(deriv, notNullValue()); + assertThat(deriv.value(), equalTo(10.0)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), equalTo(10.0)); + assertThat((DateTime) propertiesKeys[2], equalTo(key)); + assertThat((long) propertiesDocCounts[2], equalTo(3l)); + assertThat((double) propertiesCounts[2], equalTo(15.0)); + } + + @Test + public void multiValuedField() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + dateHistogram("histo").field("dates").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(4)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, nullValue()); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2.0)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(5l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(2.0)); + + key = new DateTime(2012, 4, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(3); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(-2.0)); + } + + @Test + public void unmapped() throws Exception { + SearchResponse response = client() + .prepareSearch("idx_unmapped") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(0)); + } + + @Test + public void partiallyUnmapped() throws Exception { + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .addAggregation( + dateHistogram("histo").field("date").interval(DateHistogramInterval.MONTH).minDocCount(0) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(3)); + + DateTime key = new DateTime(2012, 1, 1, 0, 0, DateTimeZone.UTC); + Histogram.Bucket bucket = buckets.get(0); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(1l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(true)); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, nullValue()); + + key = new DateTime(2012, 2, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(1); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(2l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1.0)); + + key = new DateTime(2012, 3, 1, 0, 0, DateTimeZone.UTC); + bucket = buckets.get(2); + assertThat(bucket, notNullValue()); + assertThat((DateTime) bucket.getKey(), equalTo(key)); + assertThat(bucket.getDocCount(), equalTo(3l)); + assertThat(bucket.getAggregations().asList().isEmpty(), is(false)); + docCountDeriv = bucket.getAggregations().get("deriv"); + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo(1.0)); + } + +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java new file mode 100644 index 00000000000..0135f72b4be --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/DerivativeTests.java @@ -0,0 +1,560 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.stats.Stats; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.support.AggregationPath; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.stats; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.derivative; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.closeTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class DerivativeTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + private static int interval; + private static int numValueBuckets; + private static int numFirstDerivValueBuckets; + private static int numSecondDerivValueBuckets; + private static long[] valueCounts; + private static long[] firstDerivValueCounts; + private static long[] secondDerivValueCounts; + + private static Long[] valueCounts_empty; + private static long numDocsEmptyIdx; + private static Double[] firstDerivValueCounts_empty; + + // expected bucket values for random setup with gaps + private static int numBuckets_empty_rnd; + private static Long[] valueCounts_empty_rnd; + private static Double[] firstDerivValueCounts_empty_rnd; + private static long numDocsEmptyIdx_rnd; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + interval = 5; + numValueBuckets = randomIntBetween(6, 80); + + valueCounts = new long[numValueBuckets]; + for (int i = 0; i < numValueBuckets; i++) { + valueCounts[i] = randomIntBetween(1, 20); + } + + numFirstDerivValueBuckets = numValueBuckets - 1; + firstDerivValueCounts = new long[numFirstDerivValueBuckets]; + Long lastValueCount = null; + for (int i = 0; i < numValueBuckets; i++) { + long thisValue = valueCounts[i]; + if (lastValueCount != null) { + long diff = thisValue - lastValueCount; + firstDerivValueCounts[i - 1] = diff; + } + lastValueCount = thisValue; + } + + numSecondDerivValueBuckets = numFirstDerivValueBuckets - 1; + secondDerivValueCounts = new long[numSecondDerivValueBuckets]; + Long lastFirstDerivativeValueCount = null; + for (int i = 0; i < numFirstDerivValueBuckets; i++) { + long thisFirstDerivativeValue = firstDerivValueCounts[i]; + if (lastFirstDerivativeValueCount != null) { + long diff = thisFirstDerivativeValue - lastFirstDerivativeValueCount; + secondDerivValueCounts[i - 1] = diff; + } + lastFirstDerivativeValueCount = thisFirstDerivativeValue; + } + + List builders = new ArrayList<>(); + for (int i = 0; i < numValueBuckets; i++) { + for (int docs = 0; docs < valueCounts[i]; docs++) { + builders.add(client().prepareIndex("idx", "type").setSource(newDocBuilder(i * interval))); + } + } + + // setup for index with empty buckets + valueCounts_empty = new Long[] { 1l, 1l, 2l, 0l, 2l, 2l, 0l, 0l, 0l, 3l, 2l, 1l }; + firstDerivValueCounts_empty = new Double[] { null, 0d, 1d, -2d, 2d, 0d, -2d, 0d, 0d, 3d, -1d, -1d }; + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < valueCounts_empty.length; i++) { + for (int docs = 0; docs < valueCounts_empty[i]; docs++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type").setSource(newDocBuilder(i))); + numDocsEmptyIdx++; + } + } + + // randomized setup for index with empty buckets + numBuckets_empty_rnd = randomIntBetween(20, 100); + valueCounts_empty_rnd = new Long[numBuckets_empty_rnd]; + firstDerivValueCounts_empty_rnd = new Double[numBuckets_empty_rnd]; + firstDerivValueCounts_empty_rnd[0] = null; + + assertAcked(prepareCreate("empty_bucket_idx_rnd").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < numBuckets_empty_rnd; i++) { + valueCounts_empty_rnd[i] = (long) randomIntBetween(1, 10); + // make approximately half of the buckets empty + if (randomBoolean()) + valueCounts_empty_rnd[i] = 0l; + for (int docs = 0; docs < valueCounts_empty_rnd[i]; docs++) { + builders.add(client().prepareIndex("empty_bucket_idx_rnd", "type").setSource(newDocBuilder(i))); + numDocsEmptyIdx_rnd++; + } + if (i > 0) { + firstDerivValueCounts_empty_rnd[i] = (double) valueCounts_empty_rnd[i] - valueCounts_empty_rnd[i - 1]; + } + } + + indexRandom(true, builders); + ensureSearchable(); + } + + private XContentBuilder newDocBuilder(int singleValueFieldValue) throws IOException { + return jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, singleValueFieldValue).endObject(); + } + + /** + * test first and second derivative on the sing + */ + @Test + public void docCountDerivative() { + + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count")) + .subAggregation(derivative("2nd_deriv").setBucketsPaths("deriv"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1])); + } else { + assertThat(docCountDeriv, nullValue()); + } + SimpleValue docCount2ndDeriv = bucket.getAggregations().get("2nd_deriv"); + if (i > 1) { + assertThat(docCount2ndDeriv, notNullValue()); + assertThat(docCount2ndDeriv.value(), equalTo((double) secondDerivValueCounts[i - 2])); + } else { + assertThat(docCount2ndDeriv, nullValue()); + } + } + } + + @Test + public void singleValueAggDerivative() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); + Object[] propertiesKeys = (Object[]) deriv.getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count"); + Object[] propertiesSumCounts = (Object[]) deriv.getProperty("sum.value"); + + List buckets = new ArrayList(deriv.getBuckets()); + Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + // overwritten + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + long expectedSum = valueCounts[i] * (i * interval); + assertThat(sum.getValue(), equalTo((double) expectedSum)); + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(sumDeriv, notNullValue()); + long sumDerivValue = expectedSum - expectedSumPreviousBucket; + assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), + equalTo((double) sumDerivValue)); + } else { + assertThat(sumDeriv, nullValue()); + } + expectedSumPreviousBucket = expectedSum; + assertThat((long) propertiesKeys[i], equalTo((long) i * interval)); + assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i])); + assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum)); + } + } + + @Test + public void multiValueAggDerivative() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(stats("stats").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("stats.sum"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); + Object[] propertiesKeys = (Object[]) deriv.getProperty("_key"); + Object[] propertiesDocCounts = (Object[]) deriv.getProperty("_count"); + Object[] propertiesSumCounts = (Object[]) deriv.getProperty("stats.sum"); + + List buckets = new ArrayList(deriv.getBuckets()); + Long expectedSumPreviousBucket = Long.MIN_VALUE; // start value, gets + // overwritten + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + Stats stats = bucket.getAggregations().get("stats"); + assertThat(stats, notNullValue()); + long expectedSum = valueCounts[i] * (i * interval); + assertThat(stats.getSum(), equalTo((double) expectedSum)); + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(sumDeriv, notNullValue()); + long sumDerivValue = expectedSum - expectedSumPreviousBucket; + assertThat(sumDeriv.value(), equalTo((double) sumDerivValue)); + assertThat((double) bucket.getProperty("histo", AggregationPath.parse("deriv.value").getPathElementsAsStringList()), + equalTo((double) sumDerivValue)); + } else { + assertThat(sumDeriv, nullValue()); + } + expectedSumPreviousBucket = expectedSum; + assertThat((long) propertiesKeys[i], equalTo((long) i * interval)); + assertThat((long) propertiesDocCounts[i], equalTo(valueCounts[i])); + assertThat((double) propertiesSumCounts[i], equalTo((double) expectedSum)); + } + } + + @Test + public void unmapped() throws Exception { + SearchResponse response = client() + .prepareSearch("idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + assertThat(deriv.getBuckets().size(), equalTo(0)); + } + + @Test + public void partiallyUnmapped() throws Exception { + SearchResponse response = client() + .prepareSearch("idx", "idx_unmapped") + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram deriv = response.getAggregations().get("histo"); + assertThat(deriv, notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(deriv.getBuckets().size(), equalTo(numValueBuckets)); + + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i * interval, valueCounts[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (i > 0) { + assertThat(docCountDeriv, notNullValue()); + assertThat(docCountDeriv.value(), equalTo((double) firstDerivValueCounts[i - 1])); + } else { + assertThat(docCountDeriv, nullValue()); + } + } + } + + @Test + public void docCountDerivativeWithGaps() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) + .subAggregation(derivative("deriv").setBucketsPaths("_count"))).execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); + + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (firstDerivValueCounts_empty[i] == null) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i])); + } + } + } + + @Test + public void docCountDerivativeWithGaps_random() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx_rnd") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) + .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) + .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(randomFrom(GapPolicy.values())))) + .execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets_empty_rnd)); + + for (int i = 0; i < valueCounts_empty_rnd.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (firstDerivValueCounts_empty_rnd[i] == null) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty_rnd[i])); + } + } + } + + @Test + public void docCountDerivativeWithGaps_insertZeros() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) + .subAggregation(derivative("deriv").setBucketsPaths("_count").gapPolicy(GapPolicy.INSERT_ZEROS))).execute() + .actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); + + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i + ": ", bucket, i, valueCounts_empty[i]); + SimpleValue docCountDeriv = bucket.getAggregations().get("deriv"); + if (firstDerivValueCounts_empty[i] == null) { + assertThat(docCountDeriv, nullValue()); + } else { + assertThat(docCountDeriv.value(), equalTo(firstDerivValueCounts_empty[i])); + } + } + } + + @Test + public void singleValueAggDerivativeWithGaps() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum"))).execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); + + double lastSumValue = Double.NaN; + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); + Sum sum = bucket.getAggregations().get("sum"); + double thisSumValue = sum.value(); + if (bucket.getDocCount() == 0) { + thisSumValue = Double.NaN; + } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i == 0) { + assertThat(sumDeriv, nullValue()); + } else { + double expectedDerivative = thisSumValue - lastSumValue; + if (Double.isNaN(expectedDerivative)) { + assertThat(sumDeriv.value(), equalTo(expectedDerivative)); + } else { + assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001)); + } + } + lastSumValue = thisSumValue; + } + } + + @Test + public void singleValueAggDerivativeWithGaps_insertZeros() throws Exception { + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(GapPolicy.INSERT_ZEROS))).execute() + .actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(valueCounts_empty.length)); + + double lastSumValue = Double.NaN; + for (int i = 0; i < valueCounts_empty.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty[i]); + Sum sum = bucket.getAggregations().get("sum"); + double thisSumValue = sum.value(); + if (bucket.getDocCount() == 0) { + thisSumValue = 0; + } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i == 0) { + assertThat(sumDeriv, nullValue()); + } else { + double expectedDerivative = thisSumValue - lastSumValue; + assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001)); + } + lastSumValue = thisSumValue; + } + } + + @Test + public void singleValueAggDerivativeWithGaps_random() throws Exception { + GapPolicy gapPolicy = randomFrom(GapPolicy.values()); + SearchResponse searchResponse = client() + .prepareSearch("empty_bucket_idx_rnd") + .setQuery(matchAllQuery()) + .addAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(1) + .extendedBounds(0l, (long) numBuckets_empty_rnd - 1) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME)) + .subAggregation(derivative("deriv").setBucketsPaths("sum").gapPolicy(gapPolicy))).execute().actionGet(); + + assertThat(searchResponse.getHits().getTotalHits(), equalTo(numDocsEmptyIdx_rnd)); + + InternalHistogram deriv = searchResponse.getAggregations().get("histo"); + assertThat(deriv, Matchers.notNullValue()); + assertThat(deriv.getName(), equalTo("histo")); + List buckets = deriv.getBuckets(); + assertThat(buckets.size(), equalTo(numBuckets_empty_rnd)); + + double lastSumValue = Double.NaN; + for (int i = 0; i < valueCounts_empty_rnd.length; i++) { + Histogram.Bucket bucket = buckets.get(i); + checkBucketKeyAndDocCount("Bucket " + i, bucket, i, valueCounts_empty_rnd[i]); + Sum sum = bucket.getAggregations().get("sum"); + double thisSumValue = sum.value(); + if (bucket.getDocCount() == 0) { + thisSumValue = gapPolicy == GapPolicy.INSERT_ZEROS ? 0 : Double.NaN; + } + SimpleValue sumDeriv = bucket.getAggregations().get("deriv"); + if (i == 0) { + assertThat(sumDeriv, nullValue()); + } else { + double expectedDerivative = thisSumValue - lastSumValue; + if (Double.isNaN(expectedDerivative)) { + assertThat(sumDeriv.value(), equalTo(expectedDerivative)); + } else { + assertThat(sumDeriv.value(), closeTo(expectedDerivative, 0.00001)); + } + } + lastSumValue = thisSumValue; + } + } + + private void checkBucketKeyAndDocCount(final String msg, final Histogram.Bucket bucket, final long expectedKey, + final long expectedDocCount) { + assertThat(msg, bucket, notNullValue()); + assertThat(msg + " key", ((Number) bucket.getKey()).longValue(), equalTo(expectedKey)); + assertThat(msg + " docCount", bucket.getDocCount(), equalTo(expectedDocCount)); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java new file mode 100644 index 00000000000..4b91c92fccf --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MaxBucketTests.java @@ -0,0 +1,433 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.maxBucket; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MaxBucketTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) + .endObject())); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Test + public void testDocCount_topLevel() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .addAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + if (bucket.getDocCount() > maxValue) { + maxValue = bucket.getDocCount(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + + @Test + public void testDocCount_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() > maxValue) { + maxValue = bucket.getDocCount(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + } + + @Test + public void testMetric_topLevel() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(maxBucket("max_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0l)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() > maxValue) { + maxValue = sum.value(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + + @Test + public void testMetric_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() > maxValue) { + maxValue = sum.value(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + } + + @Test + public void testMetric_asSubAggWithInsertZeros() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(maxBucket("max_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxKeys = new ArrayList<>(); + double maxValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() > maxValue) { + maxValue = sum.value(); + maxKeys = new ArrayList<>(); + maxKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == maxValue) { + maxKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxValue)); + assertThat(maxBucketValue.keys(), equalTo(maxKeys.toArray(new String[maxKeys.size()]))); + } + } + + @Test + public void testNoBuckets() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(maxBucket("max_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_bucket")); + assertThat(maxBucketValue.value(), equalTo(Double.NEGATIVE_INFINITY)); + assertThat(maxBucketValue.keys(), equalTo(new String[0])); + } + + @Test + public void testNested() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(maxBucket("max_histo_bucket").setBucketsPaths("histo>_count"))) + .addAggregation(maxBucket("max_terms_bucket").setBucketsPaths("terms>max_histo_bucket")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + List maxTermsKeys = new ArrayList<>(); + double maxTermsValue = Double.NEGATIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List maxHistoKeys = new ArrayList<>(); + double maxHistoValue = Double.NEGATIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() > maxHistoValue) { + maxHistoValue = bucket.getDocCount(); + maxHistoKeys = new ArrayList<>(); + maxHistoKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == maxHistoValue) { + maxHistoKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = termsBucket.getAggregations().get("max_histo_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_histo_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxHistoValue)); + assertThat(maxBucketValue.keys(), equalTo(maxHistoKeys.toArray(new String[maxHistoKeys.size()]))); + if (maxHistoValue > maxTermsValue) { + maxTermsValue = maxHistoValue; + maxTermsKeys = new ArrayList<>(); + maxTermsKeys.add(termsBucket.getKeyAsString()); + } else if (maxHistoValue == maxTermsValue) { + maxTermsKeys.add(termsBucket.getKeyAsString()); + } + } + + InternalBucketMetricValue maxBucketValue = response.getAggregations().get("max_terms_bucket"); + assertThat(maxBucketValue, notNullValue()); + assertThat(maxBucketValue.getName(), equalTo("max_terms_bucket")); + assertThat(maxBucketValue.value(), equalTo(maxTermsValue)); + assertThat(maxBucketValue.keys(), equalTo(maxTermsKeys.toArray(new String[maxTermsKeys.size()]))); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java new file mode 100644 index 00000000000..c34ba920da9 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/MinBucketTests.java @@ -0,0 +1,433 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram.Bucket; +import org.elasticsearch.search.aggregations.bucket.terms.Terms; +import org.elasticsearch.search.aggregations.bucket.terms.Terms.Order; +import org.elasticsearch.search.aggregations.metrics.sum.Sum; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers.GapPolicy; +import org.elasticsearch.search.aggregations.reducers.bucketmetrics.InternalBucketMetricValue; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.sum; +import static org.elasticsearch.search.aggregations.AggregationBuilders.terms; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.minBucket; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.core.IsNull.notNullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MinBucketTests extends ElasticsearchIntegrationTest { + + private static final String SINGLE_VALUED_FIELD_NAME = "l_value"; + + static int numDocs; + static int interval; + static int minRandomValue; + static int maxRandomValue; + static int numValueBuckets; + static long[] valueCounts; + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + + numDocs = randomIntBetween(6, 20); + interval = randomIntBetween(2, 5); + + minRandomValue = 0; + maxRandomValue = 20; + + numValueBuckets = ((maxRandomValue - minRandomValue) / interval) + 1; + valueCounts = new long[numValueBuckets]; + + List builders = new ArrayList<>(); + + for (int i = 0; i < numDocs; i++) { + int fieldValue = randomIntBetween(minRandomValue, maxRandomValue); + builders.add(client().prepareIndex("idx", "type").setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, fieldValue).field("tag", "tag" + (i % interval)) + .endObject())); + final int bucket = (fieldValue / interval); // + (fieldValue < 0 ? -1 : 0) - (minRandomValue / interval - 1); + valueCounts[bucket]++; + } + + assertAcked(prepareCreate("empty_bucket_idx").addMapping("type", SINGLE_VALUED_FIELD_NAME, "type=integer")); + for (int i = 0; i < 2; i++) { + builders.add(client().prepareIndex("empty_bucket_idx", "type", "" + i).setSource( + jsonBuilder().startObject().field(SINGLE_VALUED_FIELD_NAME, i * 2).endObject())); + } + indexRandom(true, builders); + ensureSearchable(); + } + + @Test + public void testDocCount_topLevel() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .addAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count")).execute().actionGet(); + + assertSearchResponse(response); + + Histogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(numValueBuckets)); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int i = 0; i < numValueBuckets; ++i) { + Histogram.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) i * interval)); + assertThat(bucket.getDocCount(), equalTo(valueCounts[i])); + if (bucket.getDocCount() < minValue) { + minValue = bucket.getDocCount(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + + @Test + public void testDocCount_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>_count"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() < minValue) { + minValue = bucket.getDocCount(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + } + + @Test + public void testMetric_topLevel() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation(terms("terms").field("tag").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(minBucket("min_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(interval)); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket bucket = buckets.get(i); + assertThat(bucket, notNullValue()); + assertThat((String) bucket.getKey(), equalTo("tag" + (i % interval))); + assertThat(bucket.getDocCount(), greaterThan(0l)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() < minValue) { + minValue = sum.value(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + + @Test + public void testMetric_asSubAgg() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum"))).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() != 0) { + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() < minValue) { + minValue = sum.value(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + } + + @Test + public void testMetric_asSubAggWithInsertZeros() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue) + .subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .subAggregation(minBucket("min_bucket").setBucketsPaths("histo>sum").gapPolicy(GapPolicy.INSERT_ZEROS))) + .execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minKeys = new ArrayList<>(); + double minValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + Sum sum = bucket.getAggregations().get("sum"); + assertThat(sum, notNullValue()); + if (sum.value() < minValue) { + minValue = sum.value(); + minKeys = new ArrayList<>(); + minKeys.add(bucket.getKeyAsString()); + } else if (sum.value() == minValue) { + minKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(minValue)); + assertThat(minBucketValue.keys(), equalTo(minKeys.toArray(new String[minKeys.size()]))); + } + } + + @Test + public void testNoBuckets() throws Exception { + SearchResponse response = client().prepareSearch("idx") + .addAggregation(terms("terms").field("tag").exclude("tag.*").subAggregation(sum("sum").field(SINGLE_VALUED_FIELD_NAME))) + .addAggregation(minBucket("min_bucket").setBucketsPaths("terms>sum")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List buckets = terms.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_bucket")); + assertThat(minBucketValue.value(), equalTo(Double.POSITIVE_INFINITY)); + assertThat(minBucketValue.keys(), equalTo(new String[0])); + } + + @Test + public void testNested() throws Exception { + SearchResponse response = client() + .prepareSearch("idx") + .addAggregation( + terms("terms") + .field("tag") + .order(Order.term(true)) + .subAggregation( + histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval) + .extendedBounds((long) minRandomValue, (long) maxRandomValue)) + .subAggregation(minBucket("min_histo_bucket").setBucketsPaths("histo>_count"))) + .addAggregation(minBucket("min_terms_bucket").setBucketsPaths("terms>min_histo_bucket")).execute().actionGet(); + + assertSearchResponse(response); + + Terms terms = response.getAggregations().get("terms"); + assertThat(terms, notNullValue()); + assertThat(terms.getName(), equalTo("terms")); + List termsBuckets = terms.getBuckets(); + assertThat(termsBuckets.size(), equalTo(interval)); + + List minTermsKeys = new ArrayList<>(); + double minTermsValue = Double.POSITIVE_INFINITY; + for (int i = 0; i < interval; ++i) { + Terms.Bucket termsBucket = termsBuckets.get(i); + assertThat(termsBucket, notNullValue()); + assertThat((String) termsBucket.getKey(), equalTo("tag" + (i % interval))); + + Histogram histo = termsBucket.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + + List minHistoKeys = new ArrayList<>(); + double minHistoValue = Double.POSITIVE_INFINITY; + for (int j = 0; j < numValueBuckets; ++j) { + Histogram.Bucket bucket = buckets.get(j); + assertThat(bucket, notNullValue()); + assertThat(((Number) bucket.getKey()).longValue(), equalTo((long) j * interval)); + if (bucket.getDocCount() < minHistoValue) { + minHistoValue = bucket.getDocCount(); + minHistoKeys = new ArrayList<>(); + minHistoKeys.add(bucket.getKeyAsString()); + } else if (bucket.getDocCount() == minHistoValue) { + minHistoKeys.add(bucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = termsBucket.getAggregations().get("min_histo_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_histo_bucket")); + assertThat(minBucketValue.value(), equalTo(minHistoValue)); + assertThat(minBucketValue.keys(), equalTo(minHistoKeys.toArray(new String[minHistoKeys.size()]))); + if (minHistoValue < minTermsValue) { + minTermsValue = minHistoValue; + minTermsKeys = new ArrayList<>(); + minTermsKeys.add(termsBucket.getKeyAsString()); + } else if (minHistoValue == minTermsValue) { + minTermsKeys.add(termsBucket.getKeyAsString()); + } + } + + InternalBucketMetricValue minBucketValue = response.getAggregations().get("min_terms_bucket"); + assertThat(minBucketValue, notNullValue()); + assertThat(minBucketValue.getName(), equalTo("min_terms_bucket")); + assertThat(minBucketValue.value(), equalTo(minTermsValue)); + assertThat(minBucketValue.keys(), equalTo(minTermsKeys.toArray(new String[minTermsKeys.size()]))); + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java new file mode 100644 index 00000000000..0b0f720344f --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/ReducerHelperTests.java @@ -0,0 +1,131 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers; + + +import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.AvgBuilder; +import org.elasticsearch.search.aggregations.metrics.max.MaxBuilder; +import org.elasticsearch.search.aggregations.metrics.min.MinBuilder; +import org.elasticsearch.search.aggregations.metrics.sum.SumBuilder; +import org.elasticsearch.test.ElasticsearchTestCase; + +import java.util.ArrayList; + +/** + * Provides helper methods and classes for use in Reducer tests, such as creating mock histograms or computing + * simple metrics + */ +public class ReducerHelperTests extends ElasticsearchTestCase { + + /** + * Generates a mock histogram to use for testing. Each MockBucket holds a doc count, key and document values + * which can later be used to compute metrics and compare against the real aggregation results. Gappiness can be + * controlled via parameters + * + * @param interval Interval between bucket keys + * @param size Size of mock histogram to generate (in buckets) + * @param gapProbability Probability of generating an empty bucket. 0.0-1.0 inclusive + * @param runProbability Probability of extending a gap once one has been created. 0.0-1.0 inclusive + * @return + */ + public static ArrayList generateHistogram(int interval, int size, double gapProbability, double runProbability) { + ArrayList values = new ArrayList<>(size); + + boolean lastWasGap = false; + + for (int i = 0; i < size; i++) { + MockBucket bucket = new MockBucket(); + if (randomDouble() < gapProbability) { + // start a gap + bucket.count = 0; + bucket.docValues = new double[0]; + + lastWasGap = true; + + } else if (lastWasGap && randomDouble() < runProbability) { + // add to the existing gap + bucket.count = 0; + bucket.docValues = new double[0]; + + lastWasGap = true; + } else { + bucket.count = randomIntBetween(1, 50); + bucket.docValues = new double[bucket.count]; + for (int j = 0; j < bucket.count; j++) { + bucket.docValues[j] = randomDouble() * randomIntBetween(-20,20); + } + lastWasGap = false; + } + + bucket.key = i * interval; + values.add(bucket); + } + + return values; + } + + /** + * Simple mock bucket container + */ + public static class MockBucket { + public int count; + public double[] docValues; + public long key; + } + + /** + * Computes a simple agg metric (min, sum, etc) from the provided values + * + * @param values Array of values to compute metric for + * @param metric A metric builder which defines what kind of metric should be returned for the values + * @return + */ + public static double calculateMetric(double[] values, ValuesSourceMetricsAggregationBuilder metric) { + + if (metric instanceof MinBuilder) { + double accumulator = Double.MAX_VALUE; + for (double value : values) { + accumulator = Math.min(accumulator, value); + } + return accumulator; + } else if (metric instanceof MaxBuilder) { + double accumulator = Double.MIN_VALUE; + for (double value : values) { + accumulator = Math.max(accumulator, value); + } + return accumulator; + } else if (metric instanceof SumBuilder) { + double accumulator = 0; + for (double value : values) { + accumulator += value; + } + return accumulator; + } else if (metric instanceof AvgBuilder) { + double accumulator = 0; + for (double value : values) { + accumulator += value; + } + return accumulator / values.length; + } + + return 0.0; + } +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java new file mode 100644 index 00000000000..77b7c8bc208 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgTests.java @@ -0,0 +1,1086 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.moving.avg; + + +import com.google.common.collect.EvictingQueue; + +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchPhaseExecutionException; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.index.query.RangeFilterBuilder; +import org.elasticsearch.search.aggregations.bucket.filter.InternalFilter; +import org.elasticsearch.search.aggregations.bucket.histogram.Histogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalHistogram.Bucket; +import org.elasticsearch.search.aggregations.metrics.ValuesSourceMetricsAggregationBuilder; +import org.elasticsearch.search.aggregations.reducers.BucketHelpers; +import org.elasticsearch.search.aggregations.reducers.ReducerHelperTests; +import org.elasticsearch.search.aggregations.reducers.SimpleValue; +import org.elasticsearch.search.aggregations.reducers.movavg.models.DoubleExpModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.LinearModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.MovAvgModelBuilder; +import org.elasticsearch.search.aggregations.reducers.movavg.models.SimpleModel; +import org.elasticsearch.search.aggregations.reducers.movavg.models.SingleExpModel; +import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.hamcrest.Matchers; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.elasticsearch.search.aggregations.AggregationBuilders.avg; +import static org.elasticsearch.search.aggregations.AggregationBuilders.filter; +import static org.elasticsearch.search.aggregations.AggregationBuilders.histogram; +import static org.elasticsearch.search.aggregations.AggregationBuilders.max; +import static org.elasticsearch.search.aggregations.AggregationBuilders.min; +import static org.elasticsearch.search.aggregations.AggregationBuilders.range; +import static org.elasticsearch.search.aggregations.reducers.ReducerBuilders.movingAvg; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchResponse; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.core.IsNull.notNullValue; +import static org.hamcrest.core.IsNull.nullValue; + +@ElasticsearchIntegrationTest.SuiteScopeTest +public class MovAvgTests extends ElasticsearchIntegrationTest { + + private static final String INTERVAL_FIELD = "l_value"; + private static final String VALUE_FIELD = "v_value"; + private static final String GAP_FIELD = "g_value"; + + static int interval; + static int numBuckets; + static int windowSize; + static double alpha; + static double beta; + static BucketHelpers.GapPolicy gapPolicy; + static ValuesSourceMetricsAggregationBuilder metric; + static List mockHisto; + + static Map> testValues; + + + enum MovAvgType { + SIMPLE ("simple"), LINEAR("linear"), SINGLE("single"), DOUBLE("double"); + + private final String name; + + MovAvgType(String s) { + name = s; + } + + public String toString(){ + return name; + } + } + + enum MetricTarget { + VALUE ("value"), COUNT("count"); + + private final String name; + + MetricTarget(String s) { + name = s; + } + + public String toString(){ + return name; + } + } + + + @Override + public void setupSuiteScopeCluster() throws Exception { + createIndex("idx"); + createIndex("idx_unmapped"); + List builders = new ArrayList<>(); + + + interval = 5; + numBuckets = randomIntBetween(6, 80); + windowSize = randomIntBetween(3, 10); + alpha = randomDouble(); + beta = randomDouble(); + + gapPolicy = randomBoolean() ? BucketHelpers.GapPolicy.SKIP : BucketHelpers.GapPolicy.INSERT_ZEROS; + metric = randomMetric("the_metric", VALUE_FIELD); + mockHisto = ReducerHelperTests.generateHistogram(interval, numBuckets, randomDouble(), randomDouble()); + + testValues = new HashMap<>(8); + + for (MovAvgType type : MovAvgType.values()) { + for (MetricTarget target : MetricTarget.values()) { + setupExpected(type, target); + } + } + + for (ReducerHelperTests.MockBucket mockBucket : mockHisto) { + for (double value : mockBucket.docValues) { + builders.add(client().prepareIndex("idx", "type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, mockBucket.key) + .field(VALUE_FIELD, value).endObject())); + } + } + + // Used for specially crafted gap tests + builders.add(client().prepareIndex("idx", "gap_type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, 0) + .field(GAP_FIELD, 1).endObject())); + + builders.add(client().prepareIndex("idx", "gap_type").setSource(jsonBuilder().startObject() + .field(INTERVAL_FIELD, 49) + .field(GAP_FIELD, 1).endObject())); + + indexRandom(true, builders); + ensureSearchable(); + } + + /** + * Calculates the moving averages for a specific (model, target) tuple based on the previously generated mock histogram. + * Computed values are stored in the testValues map. + * + * @param type The moving average model to use + * @param target The document field "target", e.g. _count or a field value + */ + private void setupExpected(MovAvgType type, MetricTarget target) { + ArrayList values = new ArrayList<>(numBuckets); + EvictingQueue window = EvictingQueue.create(windowSize); + + for (ReducerHelperTests.MockBucket mockBucket : mockHisto) { + double metricValue; + double[] docValues = mockBucket.docValues; + + // Gaps only apply to metric values, not doc _counts + if (mockBucket.count == 0 && target.equals(MetricTarget.VALUE)) { + // If there was a gap in doc counts and we are ignoring, just skip this bucket + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { + values.add(null); + continue; + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // otherwise insert a zero instead of the true value + metricValue = 0.0; + } else { + metricValue = ReducerHelperTests.calculateMetric(docValues, metric); + } + + } else { + // If this isn't a gap, or is a _count, just insert the value + metricValue = target.equals(MetricTarget.VALUE) ? ReducerHelperTests.calculateMetric(docValues, metric) : mockBucket.count; + } + + window.offer(metricValue); + switch (type) { + case SIMPLE: + values.add(simple(window)); + break; + case LINEAR: + values.add(linear(window)); + break; + case SINGLE: + values.add(singleExp(window)); + break; + case DOUBLE: + values.add(doubleExp(window)); + break; + } + + } + testValues.put(type.toString() + "_" + target.toString(), values); + } + + /** + * Simple, unweighted moving average + * + * @param window Window of values to compute movavg for + * @return + */ + private double simple(Collection window) { + double movAvg = 0; + for (double value : window) { + movAvg += value; + } + movAvg /= window.size(); + return movAvg; + } + + /** + * Linearly weighted moving avg + * + * @param window Window of values to compute movavg for + * @return + */ + private double linear(Collection window) { + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + return avg / totalWeight; + } + + /** + * Single exponential moving avg + * + * @param window Window of values to compute movavg for + * @return + */ + private double singleExp(Collection window) { + double avg = 0; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + return avg; + } + + /** + * Double exponential moving avg + * @param window Window of values to compute movavg for + * @return + */ + private double doubleExp(Collection window) { + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + return s + (0 * b) ; + } + + + + + /** + * test simple moving average on single value field + */ + @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") + public void simpleSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); + + List expectedCounts = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.SIMPLE.toString() + "_" + MetricTarget.VALUE.toString()); + + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); + } + } + + @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") + public void linearSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new LinearModel.LinearModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new LinearModel.LinearModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); + + List expectedCounts = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.LINEAR.toString() + "_" + MetricTarget.VALUE.toString()); + + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); + } + } + + @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") + public void singleSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(alpha)) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new SingleExpModel.SingleExpModelBuilder().alpha(alpha)) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); + + List expectedCounts = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.SINGLE.toString() + "_" + MetricTarget.VALUE.toString()); + + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); + } + } + + @Test + @AwaitsFix(bugUrl = "Fails with certain seeds including -Dtests.seed=D9EF60095522804F") + public void doubleSingleValuedField() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(metric) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta)) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta)) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(mockHisto.size())); + + List expectedCounts = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.COUNT.toString()); + List expectedValues = testValues.get(MovAvgType.DOUBLE.toString() + "_" + MetricTarget.VALUE.toString()); + + Iterator actualIter = buckets.iterator(); + Iterator expectedBucketIter = mockHisto.iterator(); + Iterator expectedCountsIter = expectedCounts.iterator(); + Iterator expectedValuesIter = expectedValues.iterator(); + + while (actualIter.hasNext()) { + assertValidIterators(expectedBucketIter, expectedCountsIter, expectedValuesIter); + + Histogram.Bucket actual = actualIter.next(); + ReducerHelperTests.MockBucket expected = expectedBucketIter.next(); + Double expectedCount = expectedCountsIter.next(); + Double expectedValue = expectedValuesIter.next(); + + assertThat("keys do not match", ((Number) actual.getKey()).longValue(), equalTo(expected.key)); + assertThat("doc counts do not match", actual.getDocCount(), equalTo((long)expected.count)); + + assertBucketContents(actual, expectedCount, expectedValue); + } + } + + @Test + public void testSizeZeroWindow() { + try { + client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(0) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + fail("MovingAvg should not accept a window that is zero"); + + } catch (SearchPhaseExecutionException exception) { + // All good + } + } + + @Test + public void testBadParent() { + try { + client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + range("histo").field(INTERVAL_FIELD).addRange(0, 10) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(0) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + fail("MovingAvg should not accept non-histogram as parent"); + + } catch (SearchPhaseExecutionException exception) { + // All good + } + } + + @Test + public void testNegativeWindow() { + try { + client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(-10) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("_count")) + ).execute().actionGet(); + fail("MovingAvg should not accept a window that is negative"); + + } catch (SearchPhaseExecutionException exception) { + //Throwable rootCause = exception.unwrapCause(); + //assertThat(rootCause, instanceOf(SearchParseException.class)); + //assertThat("[window] value must be a positive, non-zero integer. Value supplied was [0] in [movingAvg].", equalTo(exception.getMessage())); + } + } + + @Test + public void testNoBucketsInHistogram() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field("test").interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + } + + @Test + public void testNoBucketsInHistogramWithPredict() { + int numPredictions = randomIntBetween(1,10); + SearchResponse response = client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field("test").interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(new SimpleModel.SimpleModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric") + .predict(numPredictions)) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat(buckets.size(), equalTo(0)); + } + + @Test + public void testZeroPrediction() { + try { + client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(0) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + fail("MovingAvg should not accept a prediction size that is zero"); + + } catch (SearchPhaseExecutionException exception) { + // All Good + } + } + + @Test + public void testNegativePrediction() { + try { + client() + .prepareSearch("idx").setTypes("type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(interval) + .extendedBounds(0L, (long) (interval * (numBuckets - 1))) + .subAggregation(randomMetric("the_metric", VALUE_FIELD)) + .subAggregation(movingAvg("movavg_counts") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .predict(-10) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + fail("MovingAvg should not accept a prediction size that is negative"); + + } catch (SearchPhaseExecutionException exception) { + // All Good + } + } + + /** + * This test uses the "gap" dataset, which is simply a doc at the beginning and end of + * the INTERVAL_FIELD range. These docs have a value of 1 in GAP_FIELD. + * This test verifies that large gaps don't break things, and that the mov avg roughly works + * in the correct manner (checks direction of change, but not actual values) + */ + @Test + public void testGiantGap() { + + SearchResponse response = client() + .prepareSearch("idx").setTypes("gap_type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) + .subAggregation(min("the_metric").field(GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50)); + + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_values"))).value(); + assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); + + double currentValue; + for (int i = 1; i < 49; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); + + // Since there are only two values in this test, at the beginning and end, the moving average should + // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing + // without actually verifying the computed values. Should work for all types of moving avgs and + // gap policies + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + } + + + SimpleValue current = buckets.get(49).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + currentValue = current.value(); + + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { + // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // If we insert zeros, this should always increase the moving avg since the last bucket has a real value + assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + } + } + + /** + * Big gap, but with prediction at the end. + */ + @Test + public void testGiantGapWithPredict() { + int numPredictions = randomIntBetween(1, 10); + + SearchResponse response = client() + .prepareSearch("idx").setTypes("gap_type") + .addAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) + .subAggregation(min("the_metric").field(GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric") + .predict(numPredictions)) + ).execute().actionGet(); + + assertSearchResponse(response); + + InternalHistogram histo = response.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions)); + + double lastValue = ((SimpleValue)(buckets.get(0).getAggregations().get("movavg_values"))).value(); + assertThat(Double.compare(lastValue, 0.0d), greaterThanOrEqualTo(0)); + + double currentValue; + for (int i = 1; i < 49; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); + + // Since there are only two values in this test, at the beginning and end, the moving average should + // decrease every step (until it reaches zero). Crude way to check that it's doing the right thing + // without actually verifying the computed values. Should work for all types of moving avgs and + // gap policies + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + } + + SimpleValue current = buckets.get(49).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + currentValue = current.value(); + + if (gapPolicy.equals(BucketHelpers.GapPolicy.SKIP)) { + // if we are ignoring, movavg could go up (double_exp) or stay the same (simple, linear, single_exp) + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + } else if (gapPolicy.equals(BucketHelpers.GapPolicy.INSERT_ZEROS)) { + // If we insert zeros, this should always increase the moving avg since the last bucket has a real value + assertThat(Double.compare(lastValue, currentValue), equalTo(-1)); + } + + // Now check predictions + for (int i = 50; i < 50 + numPredictions; i++) { + // Unclear at this point which direction the predictions will go, just verify they are + // not null, and that we don't have the_metric anymore + assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); + } + } + + /** + * This test filters the "gap" data so that the first doc is excluded. This leaves a long stretch of empty + * buckets until the final bucket. The moving avg should be zero up until the last bucket, and should work + * regardless of mov avg type or gap policy. + */ + @Test + public void testLeftGap() { + SearchResponse response = client() + .prepareSearch("idx").setTypes("gap_type") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).from(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + )) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50)); + + double lastValue = 0; + + double currentValue; + for (int i = 0; i < 50; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); + + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + lastValue = currentValue; + } + } + } + + @Test + public void testLeftGapWithPredict() { + int numPredictions = randomIntBetween(1, 10); + SearchResponse response = client() + .prepareSearch("idx").setTypes("gap_type") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).from(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric") + .predict(numPredictions)) + )) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions)); + + double lastValue = 0; + + double currentValue; + for (int i = 0; i < 50; i++) { + SimpleValue current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); + + assertThat(Double.compare(lastValue, currentValue), lessThanOrEqualTo(0)); + lastValue = currentValue; + } + } + + // Now check predictions + for (int i = 50; i < 50 + numPredictions; i++) { + // Unclear at this point which direction the predictions will go, just verify they are + // not null, and that we don't have the_metric anymore + assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); + } + } + + /** + * This test filters the "gap" data so that the last doc is excluded. This leaves a long stretch of empty + * buckets after the first bucket. The moving avg should be one at the beginning, then zero for the rest + * regardless of mov avg type or gap policy. + */ + @Test + public void testRightGap() { + SearchResponse response = client() + .prepareSearch("idx").setTypes("gap_type") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).to(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric")) + )) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50)); + + + SimpleValue current = buckets.get(0).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + + double lastValue = current.value(); + + double currentValue; + for (int i = 1; i < 50; i++) { + current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); + + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + } + } + + @Test + public void testRightGapWithPredict() { + int numPredictions = randomIntBetween(1, 10); + SearchResponse response = client() + .prepareSearch("idx").setTypes("gap_type") + .addAggregation( + filter("filtered").filter(new RangeFilterBuilder(INTERVAL_FIELD).to(1)).subAggregation( + histogram("histo").field(INTERVAL_FIELD).interval(1).extendedBounds(0L, 49L) + .subAggregation(randomMetric("the_metric", GAP_FIELD)) + .subAggregation(movingAvg("movavg_values") + .window(windowSize) + .modelBuilder(randomModelBuilder()) + .gapPolicy(gapPolicy) + .setBucketsPaths("the_metric") + .predict(numPredictions)) + )) + .execute().actionGet(); + + assertSearchResponse(response); + + InternalFilter filtered = response.getAggregations().get("filtered"); + assertThat(filtered, notNullValue()); + assertThat(filtered.getName(), equalTo("filtered")); + + InternalHistogram histo = filtered.getAggregations().get("histo"); + assertThat(histo, notNullValue()); + assertThat(histo.getName(), equalTo("histo")); + List buckets = histo.getBuckets(); + assertThat("Size of buckets array is not correct.", buckets.size(), equalTo(50 + numPredictions)); + + + SimpleValue current = buckets.get(0).getAggregations().get("movavg_values"); + assertThat(current, notNullValue()); + + double lastValue = current.value(); + + double currentValue; + for (int i = 1; i < 50; i++) { + current = buckets.get(i).getAggregations().get("movavg_values"); + if (current != null) { + currentValue = current.value(); + + assertThat(Double.compare(lastValue, currentValue), greaterThanOrEqualTo(0)); + lastValue = currentValue; + } + } + + // Now check predictions + for (int i = 50; i < 50 + numPredictions; i++) { + // Unclear at this point which direction the predictions will go, just verify they are + // not null, and that we don't have the_metric anymore + assertThat((buckets.get(i).getAggregations().get("movavg_values")), notNullValue()); + assertThat((buckets.get(i).getAggregations().get("the_metric")), nullValue()); + } + } + + + private void assertValidIterators(Iterator expectedBucketIter, Iterator expectedCountsIter, Iterator expectedValuesIter) { + if (!expectedBucketIter.hasNext()) { + fail("`expectedBucketIter` iterator ended before `actual` iterator, size mismatch"); + } + if (!expectedCountsIter.hasNext()) { + fail("`expectedCountsIter` iterator ended before `actual` iterator, size mismatch"); + } + if (!expectedValuesIter.hasNext()) { + fail("`expectedValuesIter` iterator ended before `actual` iterator, size mismatch"); + } + } + + private void assertBucketContents(Histogram.Bucket actual, Double expectedCount, Double expectedValue) { + // This is a gap bucket + SimpleValue countMovAvg = actual.getAggregations().get("movavg_counts"); + if (expectedCount == null) { + assertThat("[_count] movavg is not null", countMovAvg, nullValue()); + } else { + assertThat("[_count] movavg is null", countMovAvg, notNullValue()); + assertThat("[_count] movavg does not match expected ["+countMovAvg.value()+" vs "+expectedCount+"]", + Math.abs(countMovAvg.value() - expectedCount) <= 0.000001, equalTo(true)); + } + + // This is a gap bucket + SimpleValue valuesMovAvg = actual.getAggregations().get("movavg_values"); + if (expectedValue == null) { + assertThat("[value] movavg is not null", valuesMovAvg, Matchers.nullValue()); + } else { + assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); + assertThat("[value] movavg does not match expected ["+valuesMovAvg.value()+" vs "+expectedValue+"]", Math.abs(valuesMovAvg.value() - expectedValue) <= 0.000001, equalTo(true)); + } + } + + private MovAvgModelBuilder randomModelBuilder() { + int rand = randomIntBetween(0,3); + + switch (rand) { + case 0: + return new SimpleModel.SimpleModelBuilder(); + case 1: + return new LinearModel.LinearModelBuilder(); + case 2: + return new SingleExpModel.SingleExpModelBuilder().alpha(alpha); + case 3: + return new DoubleExpModel.DoubleExpModelBuilder().alpha(alpha).beta(beta); + default: + return new SimpleModel.SimpleModelBuilder(); + } + } + + private ValuesSourceMetricsAggregationBuilder randomMetric(String name, String field) { + int rand = randomIntBetween(0,3); + + switch (rand) { + case 0: + return min(name).field(field); + case 2: + return max(name).field(field); + case 3: + return avg(name).field(field); + default: + return avg(name).field(field); + } + } + +} diff --git a/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java new file mode 100644 index 00000000000..156f4f873a7 --- /dev/null +++ b/src/test/java/org/elasticsearch/search/aggregations/reducers/moving/avg/MovAvgUnitTests.java @@ -0,0 +1,297 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.reducers.moving.avg; + +import com.google.common.collect.EvictingQueue; +import org.elasticsearch.search.aggregations.reducers.movavg.models.*; +import org.elasticsearch.test.ElasticsearchTestCase; +import static org.hamcrest.Matchers.equalTo; +import org.junit.Test; + +public class MovAvgUnitTests extends ElasticsearchTestCase { + + @Test + public void testSimpleMovAvgModel() { + MovAvgModel model = new SimpleModel(); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + + double randValue = randomDouble(); + double expected = 0; + + window.offer(randValue); + + for (double value : window) { + expected += value; + } + expected /= window.size(); + + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testSimplePredictionModel() { + MovAvgModel model = new SimpleModel(); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + + double expected[] = new double[numPredictions]; + for (int i = 0; i < numPredictions; i++) { + for (double value : window) { + expected[i] += value; + } + expected[i] /= window.size(); + window.offer(expected[i]); + } + + for (int i = 0; i < numPredictions; i++) { + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } + + @Test + public void testLinearMovAvgModel() { + MovAvgModel model = new LinearModel(); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + double randValue = randomDouble(); + window.offer(randValue); + + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + double expected = avg / totalWeight; + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testLinearPredictionModel() { + MovAvgModel model = new LinearModel(); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + double expected[] = new double[numPredictions]; + + for (int i = 0; i < numPredictions; i++) { + double avg = 0; + long totalWeight = 1; + long current = 1; + + for (double value : window) { + avg += value * current; + totalWeight += current; + current += 1; + } + expected[i] = avg / totalWeight; + window.offer(expected[i]); + } + + for (int i = 0; i < numPredictions; i++) { + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } + + @Test + public void testSingleExpMovAvgModel() { + double alpha = randomDouble(); + MovAvgModel model = new SingleExpModel(alpha); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + double randValue = randomDouble(); + window.offer(randValue); + + double avg = 0; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + double expected = avg; + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testSinglePredictionModel() { + double alpha = randomDouble(); + MovAvgModel model = new SingleExpModel(alpha); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + double expected[] = new double[numPredictions]; + + for (int i = 0; i < numPredictions; i++) { + double avg = 0; + boolean first = true; + + for (double value : window) { + if (first) { + avg = value; + first = false; + } else { + avg = (value * alpha) + (avg * (1 - alpha)); + } + } + expected[i] = avg; + window.offer(expected[i]); + } + + for (int i = 0; i < numPredictions; i++) { + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } + + @Test + public void testDoubleExpMovAvgModel() { + double alpha = randomDouble(); + double beta = randomDouble(); + MovAvgModel model = new DoubleExpModel(alpha, beta); + + int numValues = randomIntBetween(1, 100); + int windowSize = randomIntBetween(1, 50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < numValues; i++) { + double randValue = randomDouble(); + window.offer(randValue); + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + double expected = s + (0 * b) ; + double actual = model.next(window); + assertThat(Double.compare(expected, actual), equalTo(0)); + } + } + + @Test + public void testDoublePredictionModel() { + double alpha = randomDouble(); + double beta = randomDouble(); + MovAvgModel model = new DoubleExpModel(alpha, beta); + + int windowSize = randomIntBetween(1, 50); + int numPredictions = randomIntBetween(1,50); + + EvictingQueue window = EvictingQueue.create(windowSize); + for (int i = 0; i < windowSize; i++) { + window.offer(randomDouble()); + } + double actual[] = model.predict(window, numPredictions); + double expected[] = new double[numPredictions]; + + double s = 0; + double last_s = 0; + + // Trend value + double b = 0; + double last_b = 0; + int counter = 0; + + double last; + for (double value : window) { + last = value; + if (counter == 1) { + s = value; + b = value - last; + } else { + s = alpha * value + (1.0d - alpha) * (last_s + last_b); + b = beta * (s - last_s) + (1 - beta) * last_b; + } + + counter += 1; + last_s = s; + last_b = b; + } + + for (int i = 0; i < numPredictions; i++) { + expected[i] = s + (i * b); + assertThat(Double.compare(expected[i], actual[i]), equalTo(0)); + } + } +} diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java index a5024920877..f11f72292e7 100644 --- a/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java +++ b/src/test/java/org/elasticsearch/search/basic/SearchWhileRelocatingTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.basic; -import com.carrotsearch.randomizedtesting.annotations.Nightly; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -43,6 +42,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTi import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchWhileRelocatingTests extends ElasticsearchIntegrationTest { // @LuceneTestCase.AwaitsFix(bugUrl = "problem with search searching on 1 shard (no replica), " + diff --git a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java index a759fb7f1c3..b4fef4f623d 100644 --- a/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java +++ b/src/test/java/org/elasticsearch/search/basic/SearchWithRandomExceptionsTests.java @@ -41,7 +41,6 @@ import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.test.engine.MockEngineSupport; import org.elasticsearch.test.engine.ThrowingLeafReaderWrapper; import org.elasticsearch.test.junit.annotations.TestLogging; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.elasticsearch.test.store.MockFSDirectoryService; import org.junit.Test; @@ -94,7 +93,7 @@ public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTes if (createIndexWithoutErrors) { Builder settings = settingsBuilder() - .put("index.number_of_replicas", randomIntBetween(0, 1)); + .put("index.number_of_replicas", numberOfReplicas()); logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); client().admin().indices().prepareCreate("test") .setSettings(settings) @@ -108,15 +107,15 @@ public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTes client().admin().indices().prepareFlush("test").setWaitIfOngoing(true).execute().get(); client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)); + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate)); client().admin().indices().prepareOpen("test").execute().get(); } else { Builder settings = settingsBuilder() .put("index.number_of_replicas", randomIntBetween(0, 1)) .put(MockFSDirectoryService.CHECK_INDEX_ON_CLOSE, false) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, exceptionRate) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, exceptionRate) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, exceptionOnOpenRate); // we cannot expect that the index will be valid logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap()); client().admin().indices().prepareCreate("test") .setSettings(settings) @@ -190,8 +189,8 @@ public class SearchWithRandomExceptionsTests extends ElasticsearchIntegrationTes // check the index still contains the records that we indexed without errors client().admin().indices().prepareClose("test").execute().get(); client().admin().indices().prepareUpdateSettings("test").setSettings(settingsBuilder() - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE, 0) - .put(MockDirectoryHelper.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0)); + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE, 0) + .put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0)); client().admin().indices().prepareOpen("test").execute().get(); ensureGreen(); SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get(); diff --git a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java index bbb4d01f96d..357a19afa60 100644 --- a/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java +++ b/src/test/java/org/elasticsearch/search/child/SimpleChildQuerySearchTests.java @@ -20,13 +20,11 @@ package org.elasticsearch.search.child; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse; import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsResponse; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.count.CountResponse; -import org.elasticsearch.action.deletebyquery.DeleteByQueryResponse; import org.elasticsearch.action.explain.ExplainResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; @@ -40,7 +38,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.fielddata.FieldDataType; import org.elasticsearch.index.mapper.FieldMapper.Loading; import org.elasticsearch.index.mapper.MergeMappingException; @@ -75,9 +73,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicReference; import static com.google.common.collect.Maps.newHashMap; -import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.common.io.Streams.copyToStringFromClasspath; -import static org.elasticsearch.common.settings.ImmutableSettings.builder; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.boolFilter; @@ -127,7 +123,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) // aggressive filter caching so that we can assert on the filter cache size - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } @@ -188,7 +184,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { @Test // see #6722 - public void test6722() throws ElasticsearchException, IOException { + public void test6722() throws IOException { assertAcked(prepareCreate("test") .addMapping("foo") .addMapping("test", "_parent", "type=foo")); @@ -207,7 +203,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { @Test // see #2744 - public void test2744() throws ElasticsearchException, IOException { + public void test2744() throws IOException { assertAcked(prepareCreate("test") .addMapping("foo") .addMapping("test", "_parent", "type=foo")); @@ -427,15 +423,15 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { for (int i = 1; i <= 10; i++) { logger.info("Round {}", i); SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())).cache(true))).execute() + .setQuery(constantScoreQuery(queryFilter(topChildrenQuery("child", matchAllQuery())))).execute() .actionGet(); assertNoFailures(searchResponse); searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")).cache(true))) + .setQuery(constantScoreQuery(queryFilter(hasChildQuery("child", matchAllQuery()).scoreType("max")))) .get(); assertNoFailures(searchResponse); searchResponse = client().prepareSearch("test") - .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")).cache(true))) + .setQuery(constantScoreQuery(queryFilter(hasParentQuery("parent", matchAllQuery()).scoreType("score")))) .get(); assertNoFailures(searchResponse); } @@ -852,7 +848,8 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { .setQuery(hasChildQuery("child", termQuery("c_field", "1")).scoreType("max")) .get(); assertThat(explainResponse.isExists(), equalTo(true)); - assertThat(explainResponse.getExplanation().toString(), equalTo("1.0 = (MATCH) sum of:\n 1.0 = not implemented yet...\n 0.0 = match on required clause, product of:\n 0.0 = # clause\n 0.0 = (MATCH) Match on id 0\n")); + // TODO: improve test once explanations are actually implemented + assertThat(explainResponse.getExplanation().toString(), startsWith("1.0 =")); } List createDocBuilders() { @@ -1094,41 +1091,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { assertSearchHit(searchResponse, 1, hasId("2")); } - @Test - public void testHasChildAndHasParentWrappedInAQueryFilterShouldNeverGetCached() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(ImmutableSettings.builder().put("index.cache.filter.type", "weighted")) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("p_field", 1).get(); - client().prepareIndex("test", "child", "2").setParent("1").setSource("c_field", 1).get(); - refresh(); - - for (int i = 0; i < 10; i++) { - SearchResponse searchResponse = client().prepareSearch("test") - .setExplain(true) - .setQuery(constantScoreQuery(boolFilter() - .must(queryFilter(hasChildQuery("child", matchQuery("c_field", 1)))) - .cache(true) - )).get(); - assertSearchHit(searchResponse, 1, hasId("1")); - // Can't start with ConstantScore(cache(BooleanFilter( - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), startsWith("ConstantScore(CustomQueryWrappingFilter(")); - - searchResponse = client().prepareSearch("test") - .setExplain(true) - .setQuery(constantScoreQuery(boolFilter() - .must(queryFilter(boolQuery().must(matchAllQuery()).must(hasChildQuery("child", matchQuery("c_field", 1))))) - .cache(true) - )).get(); - assertSearchHit(searchResponse, 1, hasId("1")); - // Can't start with ConstantScore(cache(BooleanFilter( - assertThat(searchResponse.getHits().getAt(0).explanation().getDescription(), startsWith("ConstantScore(CustomQueryWrappingFilter(")); - } - } - @Test public void testSimpleQueryRewrite() throws Exception { assertAcked(prepareCreate("test") @@ -1349,7 +1311,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test - public void testHasChildNotBeingCached() throws ElasticsearchException, IOException { + public void testHasChildNotBeingCached() throws IOException { assertAcked(prepareCreate("test") .addMapping("parent") .addMapping("child", "_parent", "type=parent")); @@ -1386,99 +1348,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { assertThat(searchResponse.getHits().totalHits(), equalTo(2l)); } - @Test - public void testDeleteByQuery_has_child() throws Exception { - assertAcked(prepareCreate("test") - .setSettings( - settingsBuilder().put(indexSettings()) - .put("index.refresh_interval", "-1") - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - // index simple data - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); - client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); - client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); - client().admin().indices().prepareFlush("test").get(); - client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); - client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get(); - client().admin().indices().prepareFlush("test").get(); - client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get(); - client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get(); - client().admin().indices().prepareRefresh().get(); - // p4 will not be found via search api, but will be deleted via delete_by_query api! - client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get(); - client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get(); - client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 2l); - - // Delete by query doesn't support p/c queries. If the delete by query has a different execution mode - // that doesn't rely on IW#deleteByQuery() then this test can be changed. - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get(); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures().length, equalTo(getNumShards("test").numPrimaries)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures()[0].reason(), containsString("[has_child] query and filter unsupported in delete_by_query api")); - client().admin().indices().prepareRefresh("test").get(); - - searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 3l); - } - - @Test - public void testDeleteByQuery_has_child_SingleRefresh() throws Exception { - assertAcked(prepareCreate("test") - .setSettings( - settingsBuilder() - .put(indexSettings()) - .put("index.refresh_interval", "-1") - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - // index simple data - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); - client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); - client().admin().indices().prepareFlush().get(); - client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); - client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); - client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - client().prepareIndex("test", "parent", "p3").setSource("p_field", "p_value3").get(); - client().prepareIndex("test", "child", "c5").setSource("c_field", "blue").setParent("p3").get(); - client().prepareIndex("test", "child", "c6").setSource("c_field", "red").setParent("p3").get(); - client().prepareIndex("test", "parent", "p4").setSource("p_field", "p_value4").get(); - client().prepareIndex("test", "child", "c7").setSource("c_field", "blue").setParent("p4").get(); - client().prepareIndex("test", "child", "c8").setSource("c_field", "red").setParent("p4").get(); - client().admin().indices().prepareRefresh().get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 3l); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test").setQuery(randomHasChild("child", "c_field", "blue")).get(); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures().length, equalTo(getNumShards("test").numPrimaries)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures()[0].reason(), containsString("[has_child] query and filter unsupported in delete_by_query api")); - client().admin().indices().prepareRefresh("test").get(); - - searchResponse = client().prepareSearch("test") - .setQuery(randomHasChild("child", "c_field", "blue")) - .get(); - assertHitCount(searchResponse, 3l); - } - private QueryBuilder randomHasChild(String type, String field, String value) { if (randomBoolean()) { if (randomBoolean()) { @@ -1491,49 +1360,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } } - @Test - public void testDeleteByQuery_has_parent() throws Exception { - assertAcked(prepareCreate("test") - .setSettings( - settingsBuilder() - .put(indexSettings()) - .put("index.refresh_interval", "-1") - ) - .addMapping("parent") - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - // index simple data - client().prepareIndex("test", "parent", "p1").setSource("p_field", "p_value1").get(); - client().prepareIndex("test", "child", "c1").setSource("c_field", "red").setParent("p1").get(); - client().prepareIndex("test", "child", "c2").setSource("c_field", "yellow").setParent("p1").get(); - client().prepareIndex("test", "parent", "p2").setSource("p_field", "p_value2").get(); - client().admin().indices().prepareFlush("test").get(); - client().prepareIndex("test", "child", "c3").setSource("c_field", "blue").setParent("p2").get(); - client().prepareIndex("test", "child", "c4").setSource("c_field", "red").setParent("p2").get(); - client().admin().indices().prepareRefresh().get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(randomHasParent("parent", "p_field", "p_value2")) - .get(); - assertHitCount(searchResponse, 2l); - - DeleteByQueryResponse deleteByQueryResponse = client().prepareDeleteByQuery("test") - .setQuery(randomHasParent("parent", "p_field", "p_value2")) - .get(); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getSuccessful(), equalTo(0)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures().length, equalTo(getNumShards("test").numPrimaries)); - assertThat(deleteByQueryResponse.getIndex("test").getShardInfo().getFailures()[0].reason(), containsString("[has_parent] query and filter unsupported in delete_by_query api")); - client().admin().indices().prepareRefresh("test").get(); - client().admin().indices().prepareRefresh("test").get(); - client().admin().indices().prepareRefresh("test").get(); - - searchResponse = client().prepareSearch("test") - .setQuery(randomHasParent("parent", "p_field", "p_value2")) - .get(); - assertHitCount(searchResponse, 2l); - } - private QueryBuilder randomHasParent(String type, String field, String value) { if (randomBoolean()) { if (randomBoolean()) { @@ -1601,7 +1427,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } @Test - public void indexChildDocWithNoParentMapping() throws ElasticsearchException, IOException { + public void indexChildDocWithNoParentMapping() throws IOException { assertAcked(prepareCreate("test") .addMapping("parent") .addMapping("child1")); @@ -1611,21 +1437,21 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { try { client().prepareIndex("test", "child1", "c1").setParent("p1").setSource("c_field", "blue").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured")); + } catch (IllegalArgumentException e) { + assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured")); } try { client().prepareIndex("test", "child2", "c2").setParent("p1").setSource("c_field", "blue").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { - assertThat(e.getMessage(), equalTo("Can't specify parent if no parent field has been configured")); + } catch (IllegalArgumentException e) { + assertThat(e.toString(), containsString("Can't specify parent if no parent field has been configured")); } refresh(); } @Test - public void testAddingParentToExistingMapping() throws ElasticsearchException, IOException { + public void testAddingParentToExistingMapping() throws IOException { createIndex("test"); ensureGreen(); @@ -1645,7 +1471,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { .endObject().endObject()).get(); fail(); } catch (MergeMappingException e) { - assertThat(e.getMessage(), equalTo("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]]}")); + assertThat(e.toString(), containsString("Merge failed with failures {[The _parent field's type option can't be changed: [null]->[parent]]}")); } } @@ -1942,8 +1768,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), boolFilter() .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red"))) - .must(matchAllFilter()) - .cache(true))) + .must(matchAllFilter()))) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(2l)); } @@ -1955,8 +1780,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { SearchResponse searchResponse = client().prepareSearch() .setQuery(filteredQuery(matchAllQuery(), boolFilter() .must(FilterBuilders.hasChildFilter("child", matchQuery("c_field", "red"))) - .must(matchAllFilter()) - .cache(true))) + .must(matchAllFilter()))) .get(); assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); @@ -2007,103 +1831,6 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { } } - @Test - public void testValidateThatHasChildAndHasParentFilterAreNeverCached() throws Exception { - assertAcked(prepareCreate("test") - .setSettings(builder().put(indexSettings()) - //we need 0 replicas here to make sure we always hit the very same shards - .put(SETTING_NUMBER_OF_REPLICAS, 0)) - .addMapping("child", "_parent", "type=parent")); - ensureGreen(); - - client().prepareIndex("test", "parent", "1").setSource("field", "value") - .get(); - client().prepareIndex("test", "child", "1").setParent("1").setSource("field", "value") - .setRefresh(true) - .get(); - - SearchResponse searchResponse = client().prepareSearch("test") - .setQuery(hasChildQuery("child", matchAllQuery())) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(hasParentQuery("parent", matchAllQuery())) - .get(); - assertHitCount(searchResponse, 1l); - - // Internally the has_child and has_parent use filter for the type field, which end up in the filter cache, - // so by first checking how much they take by executing has_child and has_parent *query* we can set a base line - // for the filter cache size in this test. - IndicesStatsResponse statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - long initialCacheSize = statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true))) - .get(); - assertHitCount(searchResponse, 1l); - - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true))) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.matchAllFilter()) - .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.matchAllFilter()) - .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), equalTo(initialCacheSize)); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.termFilter("field", "value").cache(true)) - .must(FilterBuilders.hasChildFilter("child", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - searchResponse = client().prepareSearch("test") - .setQuery(QueryBuilders.filteredQuery( - matchAllQuery(), - FilterBuilders.boolFilter().cache(true) - .must(FilterBuilders.termFilter("field", "value").cache(true)) - .must(FilterBuilders.hasParentFilter("parent", matchAllQuery()).cache(true)) - )) - .get(); - assertHitCount(searchResponse, 1l); - - // filter cache should not contain any thing, b/c has_child and has_parent can't be cached. - statsResponse = client().admin().indices().prepareStats("test").clear().setFilterCache(true).get(); - assertThat(statsResponse.getIndex("test").getTotal().getFilterCache().getMemorySizeInBytes(), greaterThan(initialCacheSize)); - } - // https://github.com/elasticsearch/elasticsearch/issues/5783 @Test public void testQueryBeforeChildType() throws Exception { @@ -2332,7 +2059,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("none", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // Score mode = SUM @@ -2412,7 +2139,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("sum", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // Score mode = MAX @@ -2492,7 +2219,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("max", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // Score mode = AVG @@ -2572,7 +2299,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxQuery("avg", 3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } // HasChildFilter @@ -2652,7 +2379,7 @@ public class SimpleChildQuerySearchTests extends ElasticsearchIntegrationTest { response = minMaxFilter(3, 2, cutoff); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("[has_child] 'max_children' is less than 'min_children'")); + assertThat(e.toString(), containsString("[has_child] 'max_children' is less than 'min_children'")); } } diff --git a/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java b/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java index 8e390c4dcfd..5bf2a8a38d6 100644 --- a/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java +++ b/src/test/java/org/elasticsearch/search/fetch/innerhits/NestedChildrenFilterTest.java @@ -31,12 +31,12 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.Filter; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryWrapperFilter; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TotalHitCountCollector; import org.apache.lucene.search.join.BitDocIdSetCachingWrapperFilter; import org.apache.lucene.search.join.BitDocIdSetFilter; import org.apache.lucene.store.Directory; -import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.innerhits.InnerHitsContext.NestedInnerHits.NestedChildrenFilter; import org.elasticsearch.test.ElasticsearchTestCase; @@ -79,8 +79,8 @@ public class NestedChildrenFilterTest extends ElasticsearchTestCase { IndexSearcher searcher = new IndexSearcher(reader); FetchSubPhase.HitContext hitContext = new FetchSubPhase.HitContext(); - BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(Queries.wrap(new TermQuery(new Term("type", "parent")))); - Filter childFilter = Queries.wrap(new TermQuery(new Term("type", "child"))); + BitDocIdSetFilter parentFilter = new BitDocIdSetCachingWrapperFilter(new QueryWrapperFilter(new TermQuery(new Term("type", "parent")))); + Filter childFilter = new QueryWrapperFilter(new TermQuery(new Term("type", "child"))); int checkedParents = 0; for (LeafReaderContext leaf : reader.leaves()) { DocIdSetIterator parents = parentFilter.getDocIdSet(leaf).iterator(); diff --git a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java index 59ff93d27d8..bd6052289c6 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/DecayFunctionScoreTests.java @@ -19,15 +19,11 @@ package org.elasticsearch.search.functionscore; -import org.elasticsearch.ElasticsearchIllegalArgumentException; -import org.elasticsearch.ElasticsearchIllegalStateException; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchType; -import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -469,7 +465,7 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void testExceptionThrownIfScaleRefNotBetween0And1() throws Exception { DecayFunctionBuilder gfb = new GaussDecayFunctionBuilder("num1", "2013-05-28", "1d").setDecay(100); } @@ -874,8 +870,8 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { searchSource().query(query))).actionGet(); fail("Should fail with SearchPhaseExecutionException"); } catch (SearchPhaseExecutionException failure) { - assertTrue(failure.getMessage().contains("SearchParseException")); - assertFalse(failure.getMessage().contains("NullPointerException")); + assertTrue(failure.toString().contains("SearchParseException")); + assertFalse(failure.toString().contains("NullPointerException")); } query = "{\n" + @@ -908,26 +904,26 @@ public class DecayFunctionScoreTests extends ElasticsearchIntegrationTest { searchSource().query(query))).actionGet(); fail("Should fail with SearchPhaseExecutionException"); } catch (SearchPhaseExecutionException failure) { - assertTrue(failure.getMessage().contains("SearchParseException")); - assertFalse(failure.getMessage().contains("NullPointerException")); - assertTrue(failure.getMessage().contains("One entry in functions list is missing a function")); + assertTrue(failure.toString().contains("SearchParseException")); + assertFalse(failure.toString().contains("NullPointerException")); + assertTrue(failure.toString().contains("One entry in functions list is missing a function")); } // next test java client try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery(FilterBuilders.matchAllFilter(), null)).get(); - } catch (ElasticsearchIllegalArgumentException failure) { - assertTrue(failure.getMessage().contains("function must not be null")); + } catch (IllegalArgumentException failure) { + assertTrue(failure.toString().contains("function must not be null")); } try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(FilterBuilders.matchAllFilter(), null)).get(); - } catch (ElasticsearchIllegalArgumentException failure) { - assertTrue(failure.getMessage().contains("function must not be null")); + } catch (IllegalArgumentException failure) { + assertTrue(failure.toString().contains("function must not be null")); } try { client().prepareSearch("t").setQuery(QueryBuilders.functionScoreQuery().add(null)).get(); - } catch (ElasticsearchIllegalArgumentException failure) { - assertTrue(failure.getMessage().contains("function must not be null")); + } catch (IllegalArgumentException failure) { + assertTrue(failure.toString().contains("function must not be null")); } } diff --git a/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java b/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java index 60f80c62717..16e4c09d8be 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/ExplainableScriptTests.java @@ -108,11 +108,8 @@ public class ExplainableScriptTests extends ElasticsearchIntegrationTest { @Override public Explanation explain(Explanation subQueryScore) throws IOException { - Explanation exp = new Explanation((float) (runAsDouble()), "This script returned " + runAsDouble()); - Explanation scoreExp = new Explanation(subQueryScore.getValue(), "_score: "); - scoreExp.addDetail(subQueryScore); - exp.addDetail(scoreExp); - return exp; + Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore); + return Explanation.match((float) (runAsDouble()), "This script returned " + runAsDouble(), scoreExp); } @Override diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java index 0d4d91b1f92..eef4ed27959 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreFieldValueTests.java @@ -88,6 +88,14 @@ public class FunctionScoreFieldValueTests extends ElasticsearchIntegrationTest { // We are expecting an exception, because 3 has no field } + // doc 3 doesn't have a "test" field but we're defaulting it to 100 so it should be last + response = client().prepareSearch("test") + .setExplain(randomBoolean()) + .setQuery(functionScoreQuery(matchAllQuery(), + fieldValueFactorFunction("test").modifier(FieldValueFactorFunction.Modifier.RECIPROCAL).missing(100))) + .get(); + assertOrderedSearchHits(response, "1", "2", "3"); + // n divided by 0 is infinity, which should provoke an exception. try { response = client().prepareSearch("test") diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java index 15b82c20dbb..3224a5b3630 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScorePluginTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.functionscore; -import org.apache.lucene.search.ComplexExplanation; import org.apache.lucene.search.Explanation; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.search.SearchResponse; @@ -33,6 +32,8 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreModule; import org.elasticsearch.plugins.AbstractPlugin; import org.elasticsearch.search.SearchHits; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; +import org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Test; @@ -43,8 +44,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.search.builder.SearchSourceBuilder.searchSource; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope; -import static org.elasticsearch.test.ElasticsearchIntegrationTest.Scope; import static org.hamcrest.Matchers.equalTo; /** @@ -142,9 +141,7 @@ public class FunctionScorePluginTests extends ElasticsearchIntegrationTest { @Override public Explanation explainFunction(String distanceString, double distanceVal, double scale) { - ComplexExplanation ce = new ComplexExplanation(); - ce.setDescription("" + distanceVal); - return ce; + return Explanation.match((float) distanceVal, "" + distanceVal); } @Override diff --git a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java index 5c7859a57d8..13f95320993 100644 --- a/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java +++ b/src/test/java/org/elasticsearch/search/functionscore/FunctionScoreTests.java @@ -172,23 +172,23 @@ public class FunctionScoreTests extends ElasticsearchIntegrationTest { SearchResponse responseWithWeights = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(termFilter(TEXT_FIELD, "value").cache(false)) + functionScoreQuery(termFilter(TEXT_FIELD, "value")) .add(gaussDecayFunction(GEO_POINT_FIELD, new GeoPoint(10, 20), "1000km")) .add(fieldValueFactorFunction(DOUBLE_FIELD).modifier(FieldValueFactorFunction.Modifier.LN).setWeight(2)) .add(scriptFunction("_index['" + TEXT_FIELD + "']['value'].tf()").setWeight(3)) ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("6.0 = (MATCH) function score, product of:\n 1.0 = (MATCH) ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = (MATCH) Math.min of\n 6.0 = (MATCH) function score, score mode [multiply]\n 1.0 = (MATCH) function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 1.0 = (MATCH) Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = (MATCH) function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 2.0 = (MATCH) product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = (MATCH) function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 3.0 = (MATCH) product of:\n 1.0 = script score function, computed with script:\"_index['text_field']['value'].tf()\n 1.0 = _score: \n 1.0 = (MATCH) ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") + equalTo("6.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 6.0 = min of:\n 6.0 = function score, score mode [multiply]\n 1.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 1.0 = Function for field geo_point_field:\n 1.0 = exp(-0.5*pow(MIN of: [Math.max(arcDistance([10.0, 20.0](=doc value),[10.0, 20.0](=origin)) - 0.0(=offset), 0)],2.0)/7.213475204444817E11)\n 2.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 2.0 = product of:\n 1.0 = field value function: ln(doc['double_field'].value * factor=1.0)\n 2.0 = weight\n 3.0 = function score, product of:\n 1.0 = match filter: QueryWrapperFilter(*:*)\n 3.0 = product of:\n 1.0 = script score function, computed with script:\"_index['text_field']['value'].tf()\n 1.0 = _score: \n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 3.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") ); responseWithWeights = client().search( searchRequest().source( searchSource().query( - functionScoreQuery(termFilter(TEXT_FIELD, "value").cache(false)) + functionScoreQuery(termFilter(TEXT_FIELD, "value")) .add(weightFactorFunction(4.0f)) ).explain(true))).actionGet(); assertThat(responseWithWeights.getHits().getAt(0).getExplanation().toString(), - equalTo("4.0 = (MATCH) function score, product of:\n 1.0 = (MATCH) ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = (MATCH) Math.min of\n 4.0 = (MATCH) product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") + equalTo("4.0 = function score, product of:\n 1.0 = ConstantScore(text_field:value), product of:\n 1.0 = boost\n 1.0 = queryNorm\n 4.0 = min of:\n 4.0 = product of:\n 1.0 = constant score 1.0 - no function provided\n 4.0 = weight\n 3.4028235E38 = maxBoost\n 1.0 = queryBoost\n") ); } diff --git a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java index bf76e8c3b5a..4b7eeadf59e 100644 --- a/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java +++ b/src/test/java/org/elasticsearch/search/geo/GeoFilterTests.java @@ -28,7 +28,6 @@ import org.apache.lucene.spatial.prefix.tree.GeohashPrefixTree; import org.apache.lucene.spatial.query.SpatialArgs; import org.apache.lucene.spatial.query.SpatialOperation; import org.apache.lucene.spatial.query.UnsupportedSpatialOperation; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.bulk.BulkItemResponse; import org.elasticsearch.action.bulk.BulkResponse; @@ -498,7 +497,6 @@ public class GeoFilterTests extends ElasticsearchIntegrationTest { Map expectedCounts = new HashMap<>(); Map expectedResults = new HashMap<>(); - Map cacheKeys = new HashMap<>(); expectedCounts.put(geoHashCellFilter("pin", geohash, false), 1L); @@ -516,19 +514,6 @@ public class GeoFilterTests extends ElasticsearchIntegrationTest { for (int j = filterBuilders.size() * 2 * randomIntBetween(1, 5); j > 0; j--) { Collections.shuffle(filterBuilders, getRandom()); for (GeohashCellFilter.Builder builder : filterBuilders) { - if (randomBoolean()) { - builder.cache(randomBoolean()); - } - if (randomBoolean()) { - String cacheKey = cacheKeys.get(builder); - if (cacheKey == null) { - cacheKey = randomUnicodeOfLength(6); - cacheKeys.put(builder, cacheKey); - } - builder.cacheKey(cacheKey); - } else { - builder.cacheKey(null); - } try { long expectedCount = expectedCounts.get(builder); SearchResponse response = client().prepareSearch("locations").setQuery(QueryBuilders.matchAllQuery()) diff --git a/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java index d663b95e92b..4cec4c431f6 100644 --- a/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java +++ b/src/test/java/org/elasticsearch/search/geo/GeoShapeIntegrationTests.java @@ -346,47 +346,6 @@ public class GeoShapeIntegrationTests extends ElasticsearchIntegrationTest { assertHitCount(result, 1); } - @Test // Issue 2944 - public void testThatShapeIsReturnedEvenWhenExclusionsAreSet() throws Exception { - String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1") - .startObject("properties").startObject("location") - .field("type", "geo_shape") - .endObject().endObject() - .startObject("_source") - .startArray("excludes").value("nonExistingField").endArray() - .endObject() - .endObject().endObject() - .string(); - assertAcked(prepareCreate("test").addMapping("type1", mapping)); - ensureGreen(); - - indexRandom(true, - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject() - .field("name", "Document 1") - .startObject("location") - .field("type", "envelope") - .startArray("coordinates").startArray().value(-45.0).value(45).endArray().startArray().value(45).value(-45).endArray().endArray() - .endObject() - .endObject())); - - SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(); - assertThat(searchResponse.getHits().totalHits(), equalTo(1L)); - - Map indexedMap = searchResponse.getHits().getAt(0).sourceAsMap(); - assertThat(indexedMap.get("location"), instanceOf(Map.class)); - Map locationMap = (Map) indexedMap.get("location"); - assertThat(locationMap.get("coordinates"), instanceOf(List.class)); - List> coordinates = (List>) locationMap.get("coordinates"); - assertThat(coordinates.size(), equalTo(2)); - assertThat(coordinates.get(0).size(), equalTo(2)); - assertThat(coordinates.get(0).get(0).doubleValue(), equalTo(-45.0)); - assertThat(coordinates.get(0).get(1).doubleValue(), equalTo(45.0)); - assertThat(coordinates.get(1).size(), equalTo(2)); - assertThat(coordinates.get(1).get(0).doubleValue(), equalTo(45.0)); - assertThat(coordinates.get(1).get(1).doubleValue(), equalTo(-45.0)); - assertThat(locationMap.size(), equalTo(2)); - } - @LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elasticsearch/elasticsearch/issues/9904") @Test public void testShapeFilterWithRandomGeoCollection() throws Exception { diff --git a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java index 9de55b2a304..b67a5c07494 100644 --- a/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/highlight/HighlighterSearchTests.java @@ -98,7 +98,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { @Test // see #3486 - public void testHighTermFrequencyDoc() throws ElasticsearchException, IOException { + public void testHighTermFrequencyDoc() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", "name", "type=string,term_vector=with_positions_offsets,store=" + (randomBoolean() ? "yes" : "no"))); ensureYellow(); @@ -115,7 +115,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testNgramHighlightingWithBrokenPositions() throws ElasticsearchException, IOException { + public void testNgramHighlightingWithBrokenPositions() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", jsonBuilder() .startObject() @@ -171,7 +171,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testMultiPhraseCutoff() throws ElasticsearchException, IOException { + public void testMultiPhraseCutoff() throws IOException { /* * MultiPhraseQuery can literally kill an entire node if there are too many terms in the * query. We cut off and extract terms if there are more than 16 terms in the query @@ -203,7 +203,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testNgramHighlightingPreLucene42() throws ElasticsearchException, IOException { + public void testNgramHighlightingPreLucene42() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", @@ -271,7 +271,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testNgramHighlighting() throws ElasticsearchException, IOException { + public void testNgramHighlighting() throws IOException { assertAcked(prepareCreate("test") .addMapping("test", "name", "type=string,analyzer=name_index_analyzer,search_analyzer=name_search_analyzer,term_vector=with_positions_offsets", @@ -1301,7 +1301,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testBoostingQueryTermVector() throws ElasticsearchException, IOException { + public void testBoostingQueryTermVector() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog") @@ -1338,7 +1338,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testCommonTermsTermVector() throws ElasticsearchException, IOException { + public void testCommonTermsTermVector() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1TermVectorMapping())); ensureGreen(); @@ -1354,7 +1354,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPhrasePrefix() throws ElasticsearchException, IOException { + public void testPhrasePrefix() throws IOException { Builder builder = settingsBuilder() .put(indexSettings()) .put("index.analysis.analyzer.synonym.tokenizer", "whitespace") @@ -2289,7 +2289,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPostingsHighlighterBoostingQuery() throws ElasticsearchException, IOException { + public void testPostingsHighlighterBoostingQuery() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); ensureGreen(); client().prepareIndex("test", "type1").setSource("field1", "this is a test", "field2", "The quick brown fox jumps over the lazy dog! Second sentence.") @@ -2306,7 +2306,7 @@ public class HighlighterSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPostingsHighlighterCommonTermsQuery() throws ElasticsearchException, IOException { + public void testPostingsHighlighterCommonTermsQuery() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", type1PostingsffsetsMapping())); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java index 7300331cab2..8792b2cb6a8 100644 --- a/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java +++ b/src/test/java/org/elasticsearch/search/innerhits/InnerHitsTests.java @@ -19,9 +19,11 @@ package org.elasticsearch.search.innerhits; +import org.elasticsearch.Version; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.support.QueryInnerHitBuilder; @@ -161,7 +163,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { assertThat(innerHits.getTotalHits(), equalTo(2l)); assertThat(innerHits.getHits().length, equalTo(1)); assertThat(innerHits.getAt(0).getHighlightFields().get("comments.message").getFragments()[0].string(), equalTo("fox eat quick")); - assertThat(innerHits.getAt(0).explanation().toString(), containsString("(MATCH) weight(comments.message:fox in")); + assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(comments.message:fox in")); assertThat(innerHits.getAt(0).getFields().get("comments.message").getValue().toString(), equalTo("eat")); assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat")); } @@ -338,7 +340,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { SearchHits innerHits = response.getHits().getAt(0).getInnerHits().get("comment"); assertThat(innerHits.getHits().length, equalTo(1)); assertThat(innerHits.getAt(0).getHighlightFields().get("message").getFragments()[0].string(), equalTo("fox eat quick")); - assertThat(innerHits.getAt(0).explanation().toString(), containsString("(MATCH) weight(message:fox")); + assertThat(innerHits.getAt(0).explanation().toString(), containsString("weight(message:fox")); assertThat(innerHits.getAt(0).getFields().get("message").getValue().toString(), equalTo("eat")); assertThat(innerHits.getAt(0).getFields().get("script").getValue().toString(), equalTo("eat")); } @@ -772,7 +774,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { @Test public void testNestedInnerHitsWithExcludeSource() throws Exception { - assertAcked(prepareCreate("articles") + assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("excludes", new String[]{"comments"}).endObject() .startObject("properties") @@ -810,7 +812,7 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { @Test public void testNestedInnerHitsHiglightWithExcludeSource() throws Exception { - assertAcked(prepareCreate("articles") + assertAcked(prepareCreate("articles").setSettings(IndexMetaData.SETTING_VERSION_CREATED, Version.V_1_4_2.id) .addMapping("article", jsonBuilder().startObject() .startObject("_source").field("excludes", new String[]{"comments"}).endObject() .startObject("properties") @@ -865,7 +867,12 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { List requests = new ArrayList<>(); requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() .field("title", "quick brown fox") - .startObject("comments").startObject("messages").field("message", "fox eat quick").endObject().endObject() + .startObject("comments") + .startArray("messages") + .startObject().field("message", "fox eat quick").endObject() + .startObject().field("message", "bear eat quick").endObject() + .endArray() + .endObject() .endObject())); indexRandom(true, requests); @@ -877,11 +884,40 @@ public class InnerHitsTests extends ElasticsearchIntegrationTest { assertThat(response.getHits().getAt(0).id(), equalTo("1")); assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l)); assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild().getField().string(), equalTo("messages")); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild().getOffset(), equalTo(0)); - assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild().getChild(), nullValue()); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue()); + + response = client().prepareSearch("articles") + .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "bear")).innerHit(new QueryInnerHitBuilder())) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(1)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue()); + + // index the message in an object form instead of an array + requests = new ArrayList<>(); + requests.add(client().prepareIndex("articles", "article", "1").setSource(jsonBuilder().startObject() + .field("title", "quick brown fox") + .startObject("comments").startObject("messages").field("message", "fox eat quick").endObject().endObject() + .endObject())); + indexRandom(true, requests); + response = client().prepareSearch("articles") + .setQuery(nestedQuery("comments.messages", matchQuery("comments.messages.message", "fox")).innerHit(new QueryInnerHitBuilder())) + .get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertThat(response.getHits().getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getTotalHits(), equalTo(1l)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).id(), equalTo("1")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getField().string(), equalTo("comments.messages")); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getOffset(), equalTo(0)); + assertThat(response.getHits().getAt(0).getInnerHits().get("comments.messages").getAt(0).getNestedIdentity().getChild(), nullValue()); } } diff --git a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java index 98478e67b24..85e20e7fcbc 100644 --- a/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java +++ b/src/test/java/org/elasticsearch/search/preference/SearchPreferenceTests.java @@ -19,10 +19,8 @@ package org.elasticsearch.search.preference; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.client.Client; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ElasticsearchIntegrationTest; @@ -36,6 +34,7 @@ import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.*; +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchPreferenceTests extends ElasticsearchIntegrationTest { @Test // see #2896 @@ -111,7 +110,7 @@ public class SearchPreferenceTests extends ElasticsearchIntegrationTest { assertThat(searchResponse.getHits().totalHits(), equalTo(1l)); } - @Test (expected = ElasticsearchIllegalArgumentException.class) + @Test (expected = IllegalArgumentException.class) public void testThatSpecifyingNonExistingNodesReturnsUsefulError() throws Exception { createIndex("test"); ensureGreen(); diff --git a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java index d0a5a6b357b..e72cad5dfdd 100644 --- a/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java +++ b/src/test/java/org/elasticsearch/search/query/SearchQueryTests.java @@ -483,7 +483,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { client().prepareSearch().setQuery(matchQuery("field1", "quick brown").type(MatchQueryBuilder.Type.PHRASE).slop(0)).get(); fail("SearchPhaseExecutionException should have been thrown"); } catch (SearchPhaseExecutionException e) { - assertTrue(e.getMessage().contains("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery")); + assertTrue(e.toString().contains("IllegalStateException[field \"field1\" was indexed without position data; cannot run PhraseQuery")); } cluster().wipeIndices("test"); } catch (MapperParsingException ex) { @@ -563,7 +563,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { fail("expected SearchPhaseExecutionException (total failure)"); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(RestStatus.BAD_REQUEST)); - assertThat(e.getMessage(), containsString("unit [D] not supported for date math")); + assertThat(e.toString(), containsString("unit [D] not supported for date math")); } } @@ -587,6 +587,44 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { assertHitCount(searchResponse, 1l); } + @Test // https://github.com/elasticsearch/elasticsearch/issues/10477 + public void testDateRangeInQueryStringWithTimeZone_10477() { + //the mapping needs to be provided upfront otherwise we are not sure how many failures we get back + //as with dynamic mappings some shards might be lacking behind and parse a different query + assertAcked(prepareCreate("test").addMapping( + "type", "past", "type=date" + )); + ensureGreen(); + + client().prepareIndex("test", "type", "1").setSource("past", "2015-04-05T23:00:00+0000").get(); + client().prepareIndex("test", "type", "2").setSource("past", "2015-04-06T00:00:00+0000").get(); + refresh(); + + // Timezone set with dates + SearchResponse searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00+0200 TO 2015-04-06T23:00:00+0200]")) + .get(); + assertHitCount(searchResponse, 2l); + + // Same timezone set with time_zone + searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00 TO 2015-04-06T23:00:00]").timeZone("+0200")) + .get(); + assertHitCount(searchResponse, 2l); + + // We set a timezone which will give no result + searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]")) + .get(); + assertHitCount(searchResponse, 0l); + + // Same timezone set with time_zone but another timezone is set directly within dates which has the precedence + searchResponse = client().prepareSearch() + .setQuery(queryStringQuery("past:[2015-04-06T00:00:00-0200 TO 2015-04-06T23:00:00-0200]").timeZone("+0200")) + .get(); + assertHitCount(searchResponse, 0l); + } + @Test public void typeFilterTypeIndexedTests() throws Exception { typeFilterTests("not_analyzed"); @@ -770,10 +808,10 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { ensureGreen(); client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get(); refresh(); - SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); + SearchResponse searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); assertHitCount(searchResponse, 1l); - searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1").cacheKey("test1"))).get(); + searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); assertHitCount(searchResponse, 1l); searchResponse = client().prepareSearch("test").setQuery(constantScoreQuery(termsFilter("field1", "value1"))).get(); @@ -1460,7 +1498,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test // see #2926 - public void testMustNot() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testMustNot() throws IOException, ExecutionException, InterruptedException { assertAcked(prepareCreate("test") //issue manifested only with shards>=2 .setSettings(SETTING_NUMBER_OF_SHARDS, between(2, DEFAULT_MAX_NUM_SHARDS))); @@ -1483,7 +1521,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test // see #2994 - public void testSimpleSpan() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testSimpleSpan() throws IOException, ExecutionException, InterruptedException { createIndex("test"); ensureGreen(); @@ -1505,7 +1543,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test - public void testSpanMultiTermQuery() throws ElasticsearchException, IOException { + public void testSpanMultiTermQuery() throws IOException { createIndex("test"); ensureGreen(); @@ -1538,7 +1576,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test - public void testSpanNot() throws ElasticsearchException, IOException, ExecutionException, InterruptedException { + public void testSpanNot() throws IOException, ExecutionException, InterruptedException { createIndex("test"); ensureGreen(); @@ -1577,7 +1615,7 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { } @Test - public void testSimpleDFSQuery() throws ElasticsearchException, IOException { + public void testSimpleDFSQuery() throws IOException { assertAcked(prepareCreate("test") .addMapping("s", jsonBuilder() .startObject() @@ -2493,8 +2531,8 @@ public class SearchQueryTests extends ElasticsearchIntegrationTest { .get(); fail("query is invalid and should have produced a parse exception"); } catch (Exception e) { - assertThat("query could not be parsed due to bad format: " + e.getMessage(), - e.getMessage().contains("Illegal value for id, expecting a string or number, got: START_ARRAY"), + assertThat("query could not be parsed due to bad format: " + e.toString(), + e.toString().contains("Illegal value for id, expecting a string or number, got: START_ARRAY"), equalTo(true)); } } diff --git a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java index 6b199d9970a..286e7f72af8 100644 --- a/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java +++ b/src/test/java/org/elasticsearch/search/scriptfilter/ScriptFilterSearchTests.java @@ -20,13 +20,11 @@ package org.elasticsearch.search.scriptfilter; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -37,8 +35,6 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; import static org.elasticsearch.index.query.FilterBuilders.scriptFilter; import static org.elasticsearch.index.query.QueryBuilders.filteredQuery; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; -import static org.elasticsearch.index.query.QueryBuilders.termQuery; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; /** @@ -50,9 +46,8 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)) - .put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, false) // aggressive filter caching so that we can assert on the number of iterations of the script filters - .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, WeightedFilterCache.class) + .put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, IndexFilterCache.class) .put(FilterCacheSettings.FILTER_CACHE_EVERYTHING, true) .build(); } @@ -118,57 +113,4 @@ public class ScriptFilterSearchTests extends ElasticsearchIntegrationTest { public static int incrementScriptCounter() { return scriptCounter.incrementAndGet(); } - - @Test - public void testCustomScriptCache() throws Exception { - assertAcked(prepareCreate("test").setSettings( - ImmutableSettings.settingsBuilder() - //needs to run without replicas to validate caching behaviour and make sure we always hit the very shame shard - .put(indexSettings()) - .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))); - client().prepareIndex("test", "type1", "1").setSource(jsonBuilder().startObject().field("test", "1").field("num", 1.0f).endObject()).execute().actionGet(); - flush(); - client().prepareIndex("test", "type1", "2").setSource(jsonBuilder().startObject().field("test", "2").field("num", 2.0f).endObject()).execute().actionGet(); - flush(); - client().prepareIndex("test", "type1", "3").setSource(jsonBuilder().startObject().field("test", "3").field("num", 3.0f).endObject()).execute().actionGet(); - flushAndRefresh(); - - String script = "org.elasticsearch.search.scriptfilter.ScriptFilterSearchTests.incrementScriptCounter() > 0"; - - scriptCounter.set(0); - logger.info("running script filter the first time"); - SearchResponse response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(3)); - - scriptCounter.set(0); - logger.info("running script filter the second time"); - response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "2"), scriptFilter(script).cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(0)); - - scriptCounter.set(0); - logger.info("running script filter with new parameters"); - response = client().prepareSearch() - .setQuery(filteredQuery(termQuery("test", "1"), scriptFilter(script).addParam("param1", "1").cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(1l)); - assertThat(scriptCounter.get(), equalTo(3)); - - scriptCounter.set(0); - logger.info("running script filter with same parameters"); - response = client().prepareSearch() - .setQuery(filteredQuery(matchAllQuery(), scriptFilter(script).addParam("param1", "1").cache(true))) - .execute().actionGet(); - - assertThat(response.getHits().totalHits(), equalTo(3l)); - assertThat(scriptCounter.get(), equalTo(0)); - } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java index d23d4da71ac..21dd1f3a1f8 100644 --- a/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java +++ b/src/test/java/org/elasticsearch/search/scroll/SearchScrollTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.search.scroll; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.search.*; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Priority; @@ -319,13 +318,13 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { try { client().prepareClearScroll().addScrollId("c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { // Fails during base64 decoding (Base64-encoded string must have at least four characters) client().prepareClearScroll().addScrollId("a").get(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { client().prepareClearScroll().addScrollId("abcabc").get(); @@ -513,7 +512,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestSearchScrollAction.buildFromContent(invalidContent, searchScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); } } @@ -530,7 +529,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestSearchScrollAction.buildFromContent(invalidContent, searchScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } } @@ -555,7 +554,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestClearScrollAction.buildFromContent(invalidContent, clearScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), equalTo("Failed to parse request body")); } } @@ -572,7 +571,7 @@ public class SearchScrollTests extends ElasticsearchIntegrationTest { RestClearScrollAction.buildFromContent(invalidContent, clearScrollRequest); fail("expected parseContent failure"); } catch (Exception e) { - assertThat(e, instanceOf(ElasticsearchIllegalArgumentException.class)); + assertThat(e, instanceOf(IllegalArgumentException.class)); assertThat(e.getMessage(), startsWith("Unknown parameter [unknown]")); } } diff --git a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java index 2e3276107fa..f95a1422786 100644 --- a/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java +++ b/src/test/java/org/elasticsearch/search/simple/SimpleSearchTests.java @@ -19,15 +19,12 @@ package org.elasticsearch.search.simple; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.util.ArrayList; @@ -49,14 +46,14 @@ public class SimpleSearchTests extends ElasticsearchIntegrationTest { try { client().prepareSearch((String) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } try { client().prepareSearch((String[]) null).setQuery(QueryBuilders.termQuery("_id", "XXX1")).execute().actionGet(); fail(); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { } } @@ -234,7 +231,7 @@ public class SimpleSearchTests extends ElasticsearchIntegrationTest { client().prepareSearch("idx").setFrom(Integer.MAX_VALUE).get(); fail(); } catch (SearchPhaseExecutionException e) { - assertThat(e.getMessage(), containsString("Result window is too large, from + size must be less than or equal to:")); + assertThat(e.toString(), containsString("Result window is too large, from + size must be less than or equal to:")); } } } diff --git a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java index b40e5547b1a..c048e1f5ed1 100644 --- a/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java +++ b/src/test/java/org/elasticsearch/search/sort/SimpleSortTests.java @@ -20,8 +20,6 @@ package org.elasticsearch.search.sort; -import com.carrotsearch.randomizedtesting.annotations.Repeat; - import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -134,7 +132,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { ensureYellow(); SearchResponse allDocsResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.boolFilter().must(FilterBuilders.termFilter("foo", "bar"), - FilterBuilders.rangeFilter("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01").cache(randomBoolean())))) + FilterBuilders.rangeFilter("timeUpdated").gte("2014/0" + randomIntBetween(1, 7) + "/01")))) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(docs).get(); assertSearchResponse(allDocsResponse); @@ -143,7 +141,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { for (int i = 0; i < numiters; i++) { SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.filteredQuery(matchAllQuery(), FilterBuilders.boolFilter().must(FilterBuilders.termFilter("foo", "bar"), - FilterBuilders.rangeFilter("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01").cache(randomBoolean())))) + FilterBuilders.rangeFilter("timeUpdated").gte("2014/" + String.format(Locale.ROOT, "%02d", randomIntBetween(1, 7)) + "/01")))) .addSort(new FieldSortBuilder("timeUpdated").order(SortOrder.ASC).unmappedType("date")) .setSize(scaledRandomIntBetween(1, docs)).get(); assertSearchResponse(searchResponse); @@ -214,7 +212,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } } - public void testRandomSorting() throws ElasticsearchException, IOException, InterruptedException, ExecutionException { + public void testRandomSorting() throws IOException, InterruptedException, ExecutionException { Random random = getRandom(); assertAcked(prepareCreate("test") .addMapping("type", @@ -1023,7 +1021,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } @Test @Slow - public void testSortMissingStrings() throws ElasticsearchException, IOException { + public void testSortMissingStrings() throws IOException { assertAcked(prepareCreate("test").addMapping("type1", XContentFactory.jsonBuilder() .startObject() @@ -1131,7 +1129,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } catch (SearchPhaseExecutionException e) { //we check that it's a parse failure rather than a different shard failure for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { - assertThat(shardSearchFailure.reason(), containsString("Parse Failure [No mapping found for [kkk] in order to sort on]")); + assertThat(shardSearchFailure.toString(), containsString("[No mapping found for [kkk] in order to sort on]")); } } @@ -1458,7 +1456,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { } @Test - public void testSortOnRareField() throws ElasticsearchException, IOException { + public void testSortOnRareField() throws IOException { assertAcked(prepareCreate("test") .addMapping("type1", XContentFactory.jsonBuilder().startObject().startObject("type1").startObject("properties") .startObject("string_values").field("type", "string").field("index", "not_analyzed").startObject("fielddata").field("format", random().nextBoolean() ? "doc_values" : null).endObject().endObject() @@ -1626,7 +1624,7 @@ public class SimpleSortTests extends ElasticsearchIntegrationTest { * Test case for issue 6150: https://github.com/elasticsearch/elasticsearch/issues/6150 */ @Test - public void testNestedSort() throws ElasticsearchException, IOException, InterruptedException, ExecutionException { + public void testNestedSort() throws IOException, InterruptedException, ExecutionException { assertAcked(prepareCreate("test") .addMapping("type", XContentFactory.jsonBuilder() diff --git a/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java index bfc9684cb18..55e30467863 100644 --- a/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java +++ b/src/test/java/org/elasticsearch/search/stats/SearchStatsTests.java @@ -46,6 +46,7 @@ import static org.hamcrest.Matchers.*; /** */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class SearchStatsTests extends ElasticsearchIntegrationTest { @Override diff --git a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java index b2528428297..f4c7c74933f 100644 --- a/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/CompletionSuggestSearchTests.java @@ -23,7 +23,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.google.common.collect.Lists; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse; import org.elasticsearch.action.admin.indices.optimize.OptimizeResponse; import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; @@ -50,7 +49,6 @@ import org.elasticsearch.search.suggest.completion.CompletionSuggestion; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionFuzzyBuilder; import org.elasticsearch.test.ElasticsearchIntegrationTest; -import org.elasticsearch.test.hamcrest.ElasticsearchAssertions; import org.junit.Test; import java.io.IOException; @@ -177,7 +175,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { ).get(); fail("Indexing with a float weight was successful, but should not be"); } catch (MapperParsingException e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("2.5")); + assertThat(e.toString(), containsString("2.5")); } } @@ -221,7 +219,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { ).get(); fail("Indexing with a non-number representing string as weight was successful, but should not be"); } catch (MapperParsingException e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString("thisIsNotValid")); + assertThat(e.toString(), containsString("thisIsNotValid")); } } @@ -239,7 +237,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { ).get(); fail("Indexing with weight string representing value > Int.MAX_VALUE was successful, but should not be"); } catch (MapperParsingException e) { - assertThat(ExceptionsHelper.detailedMessage(e), containsString(weight)); + assertThat(e.toString(), containsString(weight)); } } @@ -774,7 +772,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { fail("Expected an exception due to trying to sort on completion field, but did not happen"); } catch (SearchPhaseExecutionException e) { assertThat(e.status().getStatus(), is(400)); - assertThat(e.getMessage(), containsString("Sorting not supported for field[" + FIELD + "]")); + assertThat(e.toString(), containsString("Sorting not supported for field[" + FIELD + "]")); } } @@ -1096,7 +1094,7 @@ public class CompletionSuggestSearchTests extends ElasticsearchIntegrationTest { // Exception must be thrown assertFalse(true); } catch (SearchPhaseExecutionException e) { - assertTrue(e.getDetailedMessage().contains("found no fielddata type for field [" + FIELD + "]")); + assertTrue(e.toString().contains("found no fielddata type for field [" + FIELD + "]")); } } diff --git a/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java index b3d8eeeb9bc..a1f95a229cd 100644 --- a/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/ContextSuggestSearchTests.java @@ -20,15 +20,12 @@ package org.elasticsearch.search.suggest; import com.google.common.collect.Sets; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.suggest.SuggestRequest; import org.elasticsearch.action.suggest.SuggestRequestBuilder; import org.elasticsearch.action.suggest.SuggestResponse; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.geo.GeoHashUtils; import org.elasticsearch.common.geo.GeoPoint; -import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.mapper.MapperParsingException; @@ -602,7 +599,7 @@ public class ContextSuggestSearchTests extends ElasticsearchIntegrationTest { try { index(INDEX, "service", "2", jsonBuilder().startObject().startObject("suggest").field("input", "backback").endObject().endObject()); fail("index operation was not supposed to be successful"); - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { assertThat(e.getMessage(), containsString("one or more prefixes needed")); } } diff --git a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java index 6f512a29c81..86d979df31c 100644 --- a/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java +++ b/src/test/java/org/elasticsearch/search/suggest/SuggestSearchTests.java @@ -19,13 +19,10 @@ package org.elasticsearch.search.suggest; -import com.carrotsearch.randomizedtesting.annotations.Nightly; import com.google.common.base.Charsets; import com.google.common.collect.ImmutableList; import com.google.common.io.Resources; -import org.apache.lucene.util.LuceneTestCase.Slow; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.*; @@ -124,11 +121,11 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { searchSuggest( termSuggest); fail(" can not suggest across multiple indices with different analysis chains"); } catch (ReduceSearchPhaseException ex) { - assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class)); + assertThat(ex.getCause(), instanceOf(IllegalStateException.class)); assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"), endsWith("Suggest entries have different sizes actual [2] expected [1]"))); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different sizes actual [1] expected [2]"), endsWith("Suggest entries have different sizes actual [2] expected [1]"))); } @@ -144,10 +141,10 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { searchSuggest( termSuggest); fail(" can not suggest across multiple indices with different analysis chains"); } catch (ReduceSearchPhaseException ex) { - assertThat(ex.getCause(), instanceOf(ElasticsearchIllegalStateException.class)); + assertThat(ex.getCause(), instanceOf(IllegalStateException.class)); assertThat(ex.getCause().getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"), endsWith("Suggest entries have different text actual [abcd] expected [ABCD]"))); - } catch (ElasticsearchIllegalStateException ex) { + } catch (IllegalStateException ex) { assertThat(ex.getMessage(), anyOf(endsWith("Suggest entries have different text actual [ABCD] expected [abcd]"), endsWith("Suggest entries have different text actual [abcd] expected [ABCD]"))); } @@ -412,7 +409,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { } @Test // see #2817 - public void testStopwordsOnlyPhraseSuggest() throws ElasticsearchException, IOException { + public void testStopwordsOnlyPhraseSuggest() throws IOException { assertAcked(prepareCreate("test").addMapping("typ1", "body", "type=string,analyzer=stopwd").setSettings( settingsBuilder() .put("index.analysis.analyzer.stopwd.tokenizer", "whitespace") @@ -430,7 +427,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { } @Test - public void testPrefixLength() throws ElasticsearchException, IOException { // Stopped here + public void testPrefixLength() throws IOException { // Stopped here CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder() .put(SETTING_NUMBER_OF_SHARDS, 1) .put("index.analysis.analyzer.reverse.tokenizer", "standard") @@ -475,7 +472,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { @Test @Slow @Nightly - public void testMarvelHerosPhraseSuggest() throws ElasticsearchException, IOException { + public void testMarvelHerosPhraseSuggest() throws IOException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder() .put(indexSettings()) .put("index.analysis.analyzer.reverse.tokenizer", "standard") @@ -671,7 +668,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { @Test @Nightly - public void testPhraseBoundaryCases() throws ElasticsearchException, IOException { + public void testPhraseBoundaryCases() throws IOException { CreateIndexRequestBuilder builder = prepareCreate("test").setSettings(settingsBuilder() .put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 1) // to get reliable statistics we should put this all into one shard .put("index.analysis.analyzer.body.tokenizer", "standard") @@ -878,7 +875,7 @@ public class SuggestSearchTests extends ElasticsearchIntegrationTest { * score during the reduce phase. Failures don't occur every time - maybe two out of five tries but we don't repeat it to save time. */ @Test - public void testSearchForRarePhrase() throws ElasticsearchException, IOException { + public void testSearchForRarePhrase() throws IOException { // If there isn't enough chaf per shard then shards can become unbalanced, making the cutoff recheck this is testing do more harm then good. int chafPerShard = 100; diff --git a/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java index c9abce165fc..2ef5d279c10 100644 --- a/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java +++ b/src/test/java/org/elasticsearch/search/timeout/SearchTimeoutTests.java @@ -22,7 +22,6 @@ package org.elasticsearch.search.timeout; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.script.groovy.GroovyScriptEngineService; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -38,7 +37,7 @@ public class SearchTimeoutTests extends ElasticsearchIntegrationTest { @Override protected Settings nodeSettings(int nodeOrdinal) { - return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).put(GroovyScriptEngineService.GROOVY_SCRIPT_SANDBOX_ENABLED, false).build(); + return ImmutableSettings.settingsBuilder().put(super.nodeSettings(nodeOrdinal)).build(); } @Test diff --git a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java index 6cbff427afb..8d569275aea 100644 --- a/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreTests.java @@ -21,11 +21,9 @@ package org.elasticsearch.snapshots; import com.carrotsearch.hppc.IntOpenHashSet; import com.carrotsearch.hppc.IntSet; -import com.carrotsearch.randomizedtesting.LifecycleScope; import com.google.common.base.Predicate; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.ListenableFuture; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.action.ListenableActionFuture; import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; @@ -40,7 +38,9 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterService; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ProcessedClusterStateUpdateTask; +import org.elasticsearch.cluster.AbstractDiffable; import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.MetaData.Custom; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Priority; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,7 +52,7 @@ import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.elect.ElectMasterService; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.ttl.IndicesTTLService; import org.elasticsearch.repositories.RepositoryMissingException; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; @@ -699,7 +699,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests logger.info("--> update index settings to back to normal"); assertAcked(client().admin().indices().prepareUpdateSettings("test-*").setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "node") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node") )); // Make sure that snapshot finished - doesn't matter if it failed or succeeded @@ -745,12 +745,12 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests } assertAcked(client().admin().indices().prepareUpdateSettings(name).setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "all") - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, between(100, 50000)) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all") + .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, between(100, 50000)) )); } - public static abstract class TestCustomMetaData implements MetaData.Custom { + public static abstract class TestCustomMetaData extends AbstractDiffable implements MetaData.Custom { private final String data; protected TestCustomMetaData(String data) { @@ -778,194 +778,182 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests return data.hashCode(); } - public static abstract class TestCustomMetaDataFactory extends MetaData.Custom.Factory { + protected abstract TestCustomMetaData newTestCustomMetaData(String data); - protected abstract TestCustomMetaData newTestCustomMetaData(String data); + @Override + public Custom readFrom(StreamInput in) throws IOException { + return newTestCustomMetaData(in.readString()); + } - @Override - public T readFrom(StreamInput in) throws IOException { - return (T) newTestCustomMetaData(in.readString()); - } + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(getData()); + } - @Override - public void writeTo(T metadata, StreamOutput out) throws IOException { - out.writeString(metadata.getData()); - } - - @Override - public T fromXContent(XContentParser parser) throws IOException { - XContentParser.Token token; - String data = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - String currentFieldName = parser.currentName(); - if ("data".equals(currentFieldName)) { - if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); - } - data = parser.text(); - } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); + @Override + public Custom fromXContent(XContentParser parser) throws IOException { + XContentParser.Token token; + String data = null; + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + if (token == XContentParser.Token.FIELD_NAME) { + String currentFieldName = parser.currentName(); + if ("data".equals(currentFieldName)) { + if (parser.nextToken() != XContentParser.Token.VALUE_STRING) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, invalid data type"); } + data = parser.text(); } else { - throw new ElasticsearchParseException("failed to parse snapshottable metadata"); + throw new ElasticsearchParseException("failed to parse snapshottable metadata, unknown field [" + currentFieldName + "]"); } + } else { + throw new ElasticsearchParseException("failed to parse snapshottable metadata"); } - if (data == null) { - throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); - } - return (T) newTestCustomMetaData(data); } + if (data == null) { + throw new ElasticsearchParseException("failed to parse snapshottable metadata, data not found"); + } + return newTestCustomMetaData(data); + } - @Override - public void toXContent(T metadata, XContentBuilder builder, ToXContent.Params params) throws IOException { - builder.field("data", metadata.getData()); - } + @Override + public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException { + builder.field("data", getData()); + return builder; } } + static { - MetaData.registerFactory(SnapshottableMetadata.TYPE, SnapshottableMetadata.FACTORY); - MetaData.registerFactory(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.FACTORY); - MetaData.registerFactory(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.FACTORY); - MetaData.registerFactory(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.FACTORY); - MetaData.registerFactory(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.FACTORY); + MetaData.registerPrototype(SnapshottableMetadata.TYPE, SnapshottableMetadata.PROTO); + MetaData.registerPrototype(NonSnapshottableMetadata.TYPE, NonSnapshottableMetadata.PROTO); + MetaData.registerPrototype(SnapshottableGatewayMetadata.TYPE, SnapshottableGatewayMetadata.PROTO); + MetaData.registerPrototype(NonSnapshottableGatewayMetadata.TYPE, NonSnapshottableGatewayMetadata.PROTO); + MetaData.registerPrototype(SnapshotableGatewayNoApiMetadata.TYPE, SnapshotableGatewayNoApiMetadata.PROTO); } public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; - public static final Factory FACTORY = new Factory(); + public static final SnapshottableMetadata PROTO = new SnapshottableMetadata(""); public SnapshottableMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableMetadata(data); + } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableMetadata(data); - } - - @Override - public EnumSet context() { - return MetaData.API_AND_SNAPSHOT; - } + @Override + public EnumSet context() { + return MetaData.API_AND_SNAPSHOT; } } public static class NonSnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable"; - public static final Factory FACTORY = new Factory(); + public static final NonSnapshottableMetadata PROTO = new NonSnapshottableMetadata(""); public NonSnapshottableMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected NonSnapshottableMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableMetadata(data); + } - @Override - protected NonSnapshottableMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableMetadata(data); - } + @Override + public EnumSet context() { + return MetaData.API_ONLY; } } public static class SnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway"; - public static final Factory FACTORY = new Factory(); + public static final SnapshottableGatewayMetadata PROTO = new SnapshottableGatewayMetadata(""); public SnapshottableGatewayMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } + @Override + protected TestCustomMetaData newTestCustomMetaData(String data) { + return new SnapshottableGatewayMetadata(data); + } - @Override - protected TestCustomMetaData newTestCustomMetaData(String data) { - return new SnapshottableGatewayMetadata(data); - } - - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); - } + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.API, MetaData.XContentContext.SNAPSHOT, MetaData.XContentContext.GATEWAY); } } public static class NonSnapshottableGatewayMetadata extends TestCustomMetaData { public static final String TYPE = "test_non_snapshottable_gateway"; - public static final Factory FACTORY = new Factory(); + public static final NonSnapshottableGatewayMetadata PROTO = new NonSnapshottableGatewayMetadata(""); public NonSnapshottableGatewayMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { - - @Override - public String type() { - return TYPE; - } - - @Override - protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { - return new NonSnapshottableGatewayMetadata(data); - } - - @Override - public EnumSet context() { - return MetaData.API_AND_GATEWAY; - } - + @Override + public String type() { + return TYPE; } + + @Override + protected NonSnapshottableGatewayMetadata newTestCustomMetaData(String data) { + return new NonSnapshottableGatewayMetadata(data); + } + + @Override + public EnumSet context() { + return MetaData.API_AND_GATEWAY; + } + } public static class SnapshotableGatewayNoApiMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable_gateway_no_api"; - public static final Factory FACTORY = new Factory(); + public static final SnapshotableGatewayNoApiMetadata PROTO = new SnapshotableGatewayNoApiMetadata(""); public SnapshotableGatewayNoApiMetadata(String data) { super(data); } - private static class Factory extends TestCustomMetaDataFactory { + @Override + public String type() { + return TYPE; + } - @Override - public String type() { - return TYPE; - } - - @Override - protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { - return new SnapshotableGatewayNoApiMetadata(data); - } - - @Override - public EnumSet context() { - return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); - } + @Override + protected SnapshotableGatewayNoApiMetadata newTestCustomMetaData(String data) { + return new SnapshotableGatewayNoApiMetadata(data); + } + @Override + public EnumSet context() { + return EnumSet.of(MetaData.XContentContext.GATEWAY, MetaData.XContentContext.SNAPSHOT); } } diff --git a/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java index 8f86ec572a0..171ce1c3481 100644 --- a/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java +++ b/src/test/java/org/elasticsearch/snapshots/RepositoriesTests.java @@ -36,6 +36,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.repositories.RepositoryVerificationException; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; +import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; import java.nio.file.Path; @@ -48,6 +49,7 @@ import static org.hamcrest.Matchers.notNullValue; /** */ +@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 2) public class RepositoriesTests extends AbstractSnapshotTests { @Test diff --git a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java index 947527376b4..0ca85672ec9 100644 --- a/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java +++ b/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreTests.java @@ -52,11 +52,10 @@ import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.InvalidIndexNameException; import org.elasticsearch.repositories.RepositoriesService; import org.elasticsearch.snapshots.mockstore.MockRepositoryModule; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.junit.Test; import java.nio.channels.SeekableByteChannel; @@ -1486,8 +1485,8 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { // Update settings to make sure that relocation is slow so we can start snapshot before relocation is finished assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "all") - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, 100) + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "all") + .put(IndexStore.INDEX_STORE_THROTTLE_MAX_BYTES_PER_SEC, 100) )); logger.info("--> start relocations"); @@ -1502,7 +1501,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { // Update settings to back to normal assertAcked(client.admin().indices().prepareUpdateSettings("test-idx").setSettings(ImmutableSettings.builder() - .put(AbstractIndexStore.INDEX_STORE_THROTTLE_TYPE, "node") + .put(IndexStore.INDEX_STORE_THROTTLE_TYPE, "node") )); logger.info("--> wait for snapshot to complete"); @@ -1637,6 +1636,17 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests { .setIndexSettings(newIncorrectIndexSettings) .setWaitForCompletion(true), SnapshotRestoreException.class); + logger.info("--> try restoring while changing the number of replicas to a negative number - should fail"); + Settings newIncorrectReplicasIndexSettings = ImmutableSettings.builder() + .put(newIndexSettings) + .put(SETTING_NUMBER_OF_REPLICAS.substring(IndexMetaData.INDEX_SETTING_PREFIX.length()), randomIntBetween(-10, -1)) + .build(); + assertThrows(client.admin().cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setIgnoreIndexSettings("index.analysis.*") + .setIndexSettings(newIncorrectReplicasIndexSettings) + .setWaitForCompletion(true), IllegalArgumentException.class); + logger.info("--> restore index with correct settings from the snapshot"); RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster() .prepareRestoreSnapshot("test-repo", "test-snap") diff --git a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index fe2eb17f654..af3fc057d29 100644 --- a/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -123,7 +123,7 @@ public class MockRepository extends FsRepository { } @Override - protected void doStop() throws ElasticsearchException { + protected void doStop() { unblock(); super.doStop(); } diff --git a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java index 45cbd022f1c..57bcd69b91c 100644 --- a/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java +++ b/src/test/java/org/elasticsearch/stresstest/manyindices/ManyNodesManyIndicesRecoveryStressTest.java @@ -72,7 +72,9 @@ public class ManyNodesManyIndicesRecoveryStressTest { } System.out.println("--> Initiating shutdown"); - client.admin().cluster().prepareNodesShutdown().setExit(false).execute().actionGet(); + for (Node node : nodes) { + node.close(); + } System.out.println("--> Waiting for all nodes to be closed..."); while (true) { diff --git a/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java index a677acdcc63..ad03d23e295 100644 --- a/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java +++ b/src/test/java/org/elasticsearch/stresstest/search1/Search1StressTest.java @@ -63,7 +63,6 @@ public class Search1StressTest { private int numberOfValues = 20; private int numberOfHits = 300; private TimeValue flusherThrottle = TimeValue.timeValueMillis(1000); - private TimeValue deleteByQueryThrottle = TimeValue.timeValueMillis(5000); private Settings settings = ImmutableSettings.Builder.EMPTY_SETTINGS; @@ -130,11 +129,6 @@ public class Search1StressTest { return this; } - public Search1StressTest setDeleteByQueryThrottle(TimeValue deleteByQueryThrottle) { - this.deleteByQueryThrottle = deleteByQueryThrottle; - return this; - } - public Search1StressTest setSettings(Settings settings) { this.settings = settings; return this; @@ -264,28 +258,6 @@ public class Search1StressTest { } } - private class DeleteByQuery extends Thread { - volatile boolean close = false; - - volatile boolean closed = false; - - @Override - public void run() { - while (true) { - if (close) { - closed = true; - return; - } - try { - client.client().prepareDeleteByQuery().setQuery(termQuery("num", nextNumValue())).execute().actionGet(); - Thread.sleep(deleteByQueryThrottle.millis()); - } catch (Exception e) { - logger.warn("failed to delete_by_query", e); - } - } - } - } - private void indexDoc() throws Exception { XContentBuilder json = XContentFactory.jsonBuilder().startObject() .field("num", nextNumValue()) @@ -340,13 +312,6 @@ public class Search1StressTest { flusher.start(); } - DeleteByQuery deleteByQuery = null; - if (deleteByQueryThrottle.millis() > 0) { - deleteByQuery = new DeleteByQuery(); - deleteByQuery.start(); - } - - long testStart = System.currentTimeMillis(); while (true) { @@ -362,10 +327,6 @@ public class Search1StressTest { flusher.close = true; } - if (deleteByQuery != null) { - deleteByQuery.close = true; - } - for (Searcher searcherThread : searcherThreads) { searcherThread.close = true; } @@ -379,9 +340,6 @@ public class Search1StressTest { if (flusher != null && !flusher.closed) { logger.warn("flusher not closed!"); } - if (deleteByQuery != null && !deleteByQuery.closed) { - logger.warn("deleteByQuery not closed!"); - } for (Searcher searcherThread : searcherThreads) { if (!searcherThread.closed) { logger.warn("search thread not closed!"); @@ -410,7 +368,6 @@ public class Search1StressTest { .setIndexerThrottle(TimeValue.timeValueMillis(100)) .setSearchers(10) .setSearcherThrottle(TimeValue.timeValueMillis(10)) - .setDeleteByQueryThrottle(TimeValue.timeValueMillis(-1)) .setFlusherThrottle(TimeValue.timeValueMillis(1000)) .setNumberOfIndices(10) .setNumberOfTypes(5) diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java index b2c1f87b757..109bd030023 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java @@ -114,7 +114,6 @@ import org.elasticsearch.index.translog.TranslogService; import org.elasticsearch.index.translog.fs.FsTranslog; import org.elasticsearch.index.translog.fs.FsTranslogFile; import org.elasticsearch.indices.IndicesService; -import org.elasticsearch.indices.cache.filter.IndicesFilterCache; import org.elasticsearch.indices.cache.query.IndicesQueryCache; import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; import org.elasticsearch.indices.recovery.RecoverySettings; @@ -166,6 +165,8 @@ import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS; import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; +import static org.elasticsearch.test.XContentTestUtils.convertToMap; +import static org.elasticsearch.test.XContentTestUtils.mapsEqualIgnoringArrayOrder; import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; @@ -357,7 +358,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase * Creates a randomized index template. This template is used to pass in randomized settings on a * per index basis. Allows to enable/disable the randomization for number of shards and replicas */ - private void randomIndexTemplate() throws IOException { + public void randomIndexTemplate() throws IOException { // TODO move settings for random directory etc here into the index based randomized settings. if (cluster().size() > 0) { @@ -487,9 +488,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase builder.put(IndicesStore.INDICES_STORE_THROTTLE_TYPE, RandomPicks.randomFrom(random, StoreRateLimiting.Type.values())); } - if (random.nextBoolean()) { - builder.put(StoreModule.DISTIBUTOR_KEY, random.nextBoolean() ? StoreModule.LEAST_USED_DISTRIBUTOR : StoreModule.RANDOM_WEIGHT_DISTRIBUTOR); - } if (random.nextBoolean()) { builder.put(ConcurrentMergeSchedulerProvider.AUTO_THROTTLE, false); } @@ -521,7 +519,6 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase if (random.nextBoolean()) { builder.put(IndicesQueryCache.INDICES_CACHE_QUERY_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); builder.put(IndicesFieldDataCache.FIELDDATA_CACHE_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); - builder.put(IndicesFilterCache.INDICES_CACHE_FILTER_CONCURRENCY_LEVEL, RandomInts.randomIntBetween(random, 1, 32)); } if (random.nextBoolean()) { builder.put(NettyTransport.PING_SCHEDULE, RandomInts.randomIntBetween(random, 100, 2000) + "ms"); @@ -650,6 +647,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase .transientSettings().getAsMap().size(), equalTo(0)); } ensureClusterSizeConsistency(); + ensureClusterStateConsistency(); cluster().wipe(); // wipe after to make sure we fail in the test that didn't ack the delete if (afterClass || currentClusterScope == Scope.TEST) { cluster().close(); @@ -1088,8 +1086,8 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase */ public void setMinimumMasterNodes(int n) { assertTrue(client().admin().cluster().prepareUpdateSettings().setTransientSettings( - settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) - .get().isAcknowledged()); + settingsBuilder().put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES, n)) + .get().isAcknowledged()); } /** @@ -1136,6 +1134,50 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase } } + /** + * Verifies that all nodes that have the same version of the cluster state as master have same cluster state + */ + protected void ensureClusterStateConsistency() throws IOException { + if (cluster() != null) { + boolean getResolvedAddress = InetSocketTransportAddress.getResolveAddress(); + try { + InetSocketTransportAddress.setResolveAddress(false); + ClusterState masterClusterState = client().admin().cluster().prepareState().all().get().getState(); + byte[] masterClusterStateBytes = ClusterState.Builder.toBytes(masterClusterState); + // remove local node reference + masterClusterState = ClusterState.Builder.fromBytes(masterClusterStateBytes, null); + Map masterStateMap = convertToMap(masterClusterState); + int masterClusterStateSize = ClusterState.Builder.toBytes(masterClusterState).length; + String masterId = masterClusterState.nodes().masterNodeId(); + for (Client client : cluster()) { + ClusterState localClusterState = client.admin().cluster().prepareState().all().setLocal(true).get().getState(); + byte[] localClusterStateBytes = ClusterState.Builder.toBytes(localClusterState); + // remove local node reference + localClusterState = ClusterState.Builder.fromBytes(localClusterStateBytes, null); + Map localStateMap = convertToMap(localClusterState); + int localClusterStateSize = localClusterStateBytes.length; + // Check that the non-master node has the same version of the cluster state as the master and that this node didn't disconnect from the master + if (masterClusterState.version() == localClusterState.version() && localClusterState.nodes().nodes().containsKey(masterId)) { + try { + assertThat(masterClusterState.uuid(), equalTo(localClusterState.uuid())); + // We cannot compare serialization bytes since serialization order of maps is not guaranteed + // but we can compare serialization sizes - they should be the same + assertThat(masterClusterStateSize, equalTo(localClusterStateSize)); + // Compare JSON serialization + assertThat(mapsEqualIgnoringArrayOrder(masterStateMap, localStateMap), equalTo(true)); + } catch (AssertionError error) { + logger.error("Cluster state from master:\n{}\nLocal cluster state:\n{}", masterClusterState.toString(), localClusterState.toString()); + throw error; + } + } + } + } finally { + InetSocketTransportAddress.setResolveAddress(getResolvedAddress); + } + } + + } + /** * Ensures the cluster is in a searchable state for the given indices. This means a searchable copy of each * shard is available on the cluster. @@ -1421,6 +1463,24 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); } + /** Disables an index block for the specified index */ + public static void disableIndexBlock(String index, String block) { + Settings settings = ImmutableSettings.builder().put(block, false).build(); + client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); + } + + /** Enables an index block for the specified index */ + public static void enableIndexBlock(String index, String block) { + Settings settings = ImmutableSettings.builder().put(block, true).build(); + client().admin().indices().prepareUpdateSettings(index).setSettings(settings).get(); + } + + /** Sets or unsets the cluster read_only mode **/ + public static void setClusterReadOnly(boolean value) { + Settings settings = settingsBuilder().put(MetaData.SETTING_READ_ONLY, value).build(); + assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings).get()); + } + private static CountDownLatch newLatch(List latches) { CountDownLatch l = new CountDownLatch(1); latches.add(l); @@ -1485,16 +1545,16 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase int numDataNodes() default -1; /** - * Returns the minimum number of nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_MIN_NUM_DATA_NODES}. + * Returns the minimum number of nodes in the cluster. Default is -1. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ - int minNumDataNodes() default InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES; + int minNumDataNodes() default -1; /** - * Returns the maximum number of nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_MAX_NUM_DATA_NODES}. + * Returns the maximum number of nodes in the cluster. Default is -1. * Ignored when {@link ClusterScope#numDataNodes()} is set. */ - int maxNumDataNodes() default InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES; + int maxNumDataNodes() default -1; /** * Returns the number of client nodes in the cluster. Default is {@link InternalTestCluster#DEFAULT_NUM_CLIENT_NODES}, a @@ -1595,12 +1655,12 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase private int getMinNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass()); - return annotation == null ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes(); + return annotation == null || annotation.minNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MIN_NUM_DATA_NODES : annotation.minNumDataNodes(); } private int getMaxNumDataNodes() { ClusterScope annotation = getAnnotation(this.getClass()); - return annotation == null ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes(); + return annotation == null || annotation.maxNumDataNodes() == -1 ? InternalTestCluster.DEFAULT_MAX_NUM_DATA_NODES : annotation.maxNumDataNodes(); } private int getNumClientNodes() { diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java index 1f3d0eaa54b..02c02b2ed6e 100644 --- a/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java +++ b/src/test/java/org/elasticsearch/test/ElasticsearchTestCase.java @@ -28,9 +28,8 @@ import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.carrotsearch.randomizedtesting.rules.TestRuleAdapter; import com.google.common.base.Predicate; - -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.uninverting.UninvertingReader; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.LuceneTestCase.SuppressCodecs; @@ -57,37 +56,27 @@ import org.elasticsearch.test.junit.listeners.LoggingListener; import org.elasticsearch.test.junit.listeners.ReproduceInfoPrinter; import org.elasticsearch.test.search.MockSearchService; import org.elasticsearch.threadpool.ThreadPool; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; +import org.junit.*; +import org.junit.rules.RuleChain; -import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Field; import java.nio.file.FileSystem; import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Formatter; -import java.util.List; -import java.util.Locale; -import java.util.Map; -import java.util.Random; +import java.util.*; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllFilesClosed; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllSearchersClosed; +import static com.google.common.collect.Lists.newArrayList; /** * Base testcase for randomized unit testing with Elasticsearch */ @Listeners({ - ReproduceInfoPrinter.class, - LoggingListener.class + ReproduceInfoPrinter.class, + LoggingListener.class }) @ThreadLeakScope(Scope.SUITE) @ThreadLeakLingering(linger = 5000) // 5 sec lingering @@ -96,39 +85,50 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAllS // we suppress pretty much all the lucene codecs for now, except asserting // assertingcodec is the winner for a codec here: it finds bugs and gives clear exceptions. @SuppressCodecs({ - "SimpleText", "Memory", "CheapBastard", "Direct", "Compressing", "FST50", "FSTOrd50", - "TestBloomFilteredLucenePostings", "MockRandom", "BlockTreeOrds", "LuceneFixedGap", - "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", "Lucene50" + "SimpleText", "Memory", "CheapBastard", "Direct", "Compressing", "FST50", "FSTOrd50", + "TestBloomFilteredLucenePostings", "MockRandom", "BlockTreeOrds", "LuceneFixedGap", + "LuceneVarGapFixedInterval", "LuceneVarGapDocFreqInterval", "Lucene50" }) @LuceneTestCase.SuppressReproduceLine public abstract class ElasticsearchTestCase extends LuceneTestCase { - + static { SecurityHack.ensureInitialized(); } - + protected final ESLogger logger = Loggers.getLogger(getClass()); // ----------------------------------------------------------------- // Suite and test case setup/cleanup. // ----------------------------------------------------------------- - // TODO: Parent/child and other things does not work with the query cache - // We must disable query cache for both suite and test to override lucene, but LTC resets it after the suite - - @BeforeClass - public static void disableQueryCacheSuite() { - IndexSearcher.setDefaultQueryCache(null); + @Rule + public RuleChain failureAndSuccessEvents = RuleChain.outerRule(new TestRuleAdapter() { + @Override + protected void afterIfSuccessful() throws Throwable { + ElasticsearchTestCase.this.afterIfSuccessful(); + } + + @Override + protected void afterAlways(List errors) throws Throwable { + if (errors != null && errors.isEmpty() == false) { + ElasticsearchTestCase.this.afterIfFailed(errors); + } + super.afterAlways(errors); + } + }); + + /** called when a test fails, supplying the errors it generated */ + protected void afterIfFailed(List errors) { } - - @Before - public final void disableQueryCache() { - IndexSearcher.setDefaultQueryCache(null); + + /** called after a test is finished, but only if succesfull */ + protected void afterIfSuccessful() { } - + // setup mock filesystems for this test run. we change PathUtils // so that all accesses are plumbed thru any mock wrappers - + @BeforeClass public static void setFileSystem() throws Exception { Field field = PathUtils.class.getDeclaredField("DEFAULT"); @@ -137,7 +137,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { field.set(null, mock); assertEquals(mock, PathUtils.getDefaultFileSystem()); } - + @AfterClass public static void restoreFileSystem() throws Exception { Field field1 = PathUtils.class.getDeclaredField("ACTUAL_DEFAULT"); @@ -149,66 +149,46 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // setup a default exception handler which knows when and how to print a stacktrace private static Thread.UncaughtExceptionHandler defaultHandler; - + @BeforeClass public static void setDefaultExceptionHandler() throws Exception { defaultHandler = Thread.getDefaultUncaughtExceptionHandler(); Thread.setDefaultUncaughtExceptionHandler(new ElasticsearchUncaughtExceptionHandler(defaultHandler)); } - + @AfterClass public static void restoreDefaultExceptionHandler() throws Exception { Thread.setDefaultUncaughtExceptionHandler(defaultHandler); } // randomize content type for request builders - + @BeforeClass public static void setContentType() throws Exception { Requests.CONTENT_TYPE = randomFrom(XContentType.values()); Requests.INDEX_CONTENT_TYPE = randomFrom(XContentType.values()); } - + @AfterClass public static void restoreContentType() { Requests.CONTENT_TYPE = XContentType.SMILE; Requests.INDEX_CONTENT_TYPE = XContentType.JSON; } - + // randomize and override the number of cpus so tests reproduce regardless of real number of cpus - + @BeforeClass public static void setProcessors() { int numCpu = TestUtil.nextInt(random(), 1, 4); System.setProperty(EsExecutors.DEFAULT_SYSPROP, Integer.toString(numCpu)); assertEquals(numCpu, EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)); } - + @AfterClass public static void restoreProcessors() { System.clearProperty(EsExecutors.DEFAULT_SYSPROP); } - // check some things (like MockDirectoryWrappers) are closed where we currently - // manage them. TODO: can we add these to LuceneTestCase.closeAfterSuite directly? - // or something else simpler instead of the fake closeables? - - @BeforeClass - public static void setAfterSuiteAssertions() throws Exception { - closeAfterSuite(new Closeable() { - @Override - public void close() throws IOException { - assertAllFilesClosed(); - } - }); - closeAfterSuite(new Closeable() { - @Override - public void close() throws IOException { - assertAllSearchersClosed(); - } - }); - } - @After public final void ensureCleanedUp() throws Exception { MockPageCacheRecycler.ensureAllPagesAreReleased(); @@ -228,18 +208,18 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { } }); } - + // mockdirectorywrappers currently set this boolean if checkindex fails // TODO: can we do this cleaner??? - + /** MockFSDirectoryService sets this: */ public static boolean checkIndexFailed; - + @Before public final void resetCheckIndexStatus() throws Exception { checkIndexFailed = false; } - + @After public final void ensureCheckIndexPassed() throws Exception { assertFalse("at least one shard failed CheckIndex", checkIndexFailed); @@ -248,7 +228,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // ----------------------------------------------------------------- // Test facilities and facades for subclasses. // ----------------------------------------------------------------- - + // TODO: replaces uses of getRandom() with random() // TODO: decide on one set of naming for between/scaledBetween and remove others // TODO: replace frequently() with usually() @@ -258,114 +238,133 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { // TODO: replace uses of this function with random() return random(); } - + /** * Returns a "scaled" random number between min and max (inclusive). + * * @see RandomizedTest#scaledRandomIntBetween(int, int); */ public static int scaledRandomIntBetween(int min, int max) { return RandomizedTest.scaledRandomIntBetween(min, max); } - - /** + + /** * A random integer from min to max (inclusive). + * * @see #scaledRandomIntBetween(int, int) */ public static int randomIntBetween(int min, int max) { - return RandomInts.randomIntBetween(random(), min, max); + return RandomInts.randomIntBetween(random(), min, max); } - + /** * Returns a "scaled" number of iterations for loops which can have a variable - * iteration count. This method is effectively + * iteration count. This method is effectively * an alias to {@link #scaledRandomIntBetween(int, int)}. */ public static int iterations(int min, int max) { return scaledRandomIntBetween(min, max); } - - /** - * An alias for {@link #randomIntBetween(int, int)}. - * + + /** + * An alias for {@link #randomIntBetween(int, int)}. + * * @see #scaledRandomIntBetween(int, int) */ public static int between(int min, int max) { - return randomIntBetween(min, max); + return randomIntBetween(min, max); } - + /** * The exact opposite of {@link #rarely()}. */ public static boolean frequently() { - return !rarely(); + return !rarely(); } - + public static boolean randomBoolean() { return random().nextBoolean(); } - - public static byte randomByte() { return (byte) random().nextInt(); } - public static short randomShort() { return (short) random().nextInt(); } - public static int randomInt() { return random().nextInt(); } - public static float randomFloat() { return random().nextFloat(); } - public static double randomDouble() { return random().nextDouble(); } - public static long randomLong() { return random().nextLong(); } + + public static byte randomByte() { + return (byte) random().nextInt(); + } + + public static short randomShort() { + return (short) random().nextInt(); + } + + public static int randomInt() { + return random().nextInt(); + } + + public static float randomFloat() { + return random().nextFloat(); + } + + public static double randomDouble() { + return random().nextDouble(); + } + + public static long randomLong() { + return random().nextLong(); + } /** A random integer from 0..max (inclusive). */ public static int randomInt(int max) { return RandomizedTest.randomInt(max); } - + /** Pick a random object from the given array. The array must not be empty. */ public static T randomFrom(T... array) { - return RandomPicks.randomFrom(random(), array); + return RandomPicks.randomFrom(random(), array); } /** Pick a random object from the given list. */ public static T randomFrom(List list) { - return RandomPicks.randomFrom(random(), list); + return RandomPicks.randomFrom(random(), list); } - + public static String randomAsciiOfLengthBetween(int minCodeUnits, int maxCodeUnits) { - return RandomizedTest.randomAsciiOfLengthBetween(minCodeUnits, maxCodeUnits); + return RandomizedTest.randomAsciiOfLengthBetween(minCodeUnits, maxCodeUnits); } - + public static String randomAsciiOfLength(int codeUnits) { - return RandomizedTest.randomAsciiOfLength(codeUnits); + return RandomizedTest.randomAsciiOfLength(codeUnits); } - + public static String randomUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) { - return RandomizedTest.randomUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); + return RandomizedTest.randomUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); } - + public static String randomUnicodeOfLength(int codeUnits) { - return RandomizedTest.randomUnicodeOfLength(codeUnits); + return RandomizedTest.randomUnicodeOfLength(codeUnits); } public static String randomUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) { - return RandomizedTest.randomUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); + return RandomizedTest.randomUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); } - + public static String randomUnicodeOfCodepointLength(int codePoints) { - return RandomizedTest.randomUnicodeOfCodepointLength(codePoints); + return RandomizedTest.randomUnicodeOfCodepointLength(codePoints); } public static String randomRealisticUnicodeOfLengthBetween(int minCodeUnits, int maxCodeUnits) { - return RandomizedTest.randomRealisticUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); + return RandomizedTest.randomRealisticUnicodeOfLengthBetween(minCodeUnits, maxCodeUnits); } - + public static String randomRealisticUnicodeOfLength(int codeUnits) { - return RandomizedTest.randomRealisticUnicodeOfLength(codeUnits); + return RandomizedTest.randomRealisticUnicodeOfLength(codeUnits); } public static String randomRealisticUnicodeOfCodepointLengthBetween(int minCodePoints, int maxCodePoints) { - return RandomizedTest.randomRealisticUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); + return RandomizedTest.randomRealisticUnicodeOfCodepointLengthBetween(minCodePoints, maxCodePoints); } - + public static String randomRealisticUnicodeOfCodepointLength(int codePoints) { - return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); + return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); } - + public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull) { if (allowNull && random().nextBoolean()) { return null; @@ -425,7 +424,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { throw e; } } - + public static boolean awaitBusy(Predicate breakPredicate) throws InterruptedException { return awaitBusy(breakPredicate, 10, TimeUnit.SECONDS); } @@ -497,9 +496,9 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = ImmutableSettings.builder() - .put(settings) - .put("path.home", createTempDir().toAbsolutePath()) - .putArray("path.data", tmpPaths()).build(); + .put(settings) + .put("path.home", createTempDir().toAbsolutePath()) + .putArray("path.data", tmpPaths()).build(); return new NodeEnvironment(build, new Environment(build)); } @@ -524,7 +523,7 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { private ElasticsearchUncaughtExceptionHandler(Thread.UncaughtExceptionHandler parent) { this.parent = parent; } - + @Override public void uncaughtException(Thread t, Throwable e) { if (e instanceof EsRejectedExecutionException) { @@ -552,8 +551,9 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { int cnt = 1; final Formatter f = new Formatter(message, Locale.ENGLISH); for (Map.Entry e : threads.entrySet()) { - if (e.getKey().isAlive()) + if (e.getKey().isAlive()) { f.format(Locale.ENGLISH, "\n %2d) %s", cnt++, threadName(e.getKey())).flush(); + } if (e.getValue().length == 0) { message.append("\n at (empty stack)"); } else { @@ -581,4 +581,17 @@ public abstract class ElasticsearchTestCase extends LuceneTestCase { return threadGroup.getName(); } } + + /** + * Returns size random values + */ + public static List randomSubsetOf(int size, T... values) { + if (size > values.length) { + throw new IllegalArgumentException("Can\'t pick " + size + " random objects from a list of " + values.length + " objects"); + } + List list = newArrayList(values); + Collections.shuffle(list); + return list.subList(0, size); + } + } diff --git a/src/test/java/org/elasticsearch/test/ExternalNode.java b/src/test/java/org/elasticsearch/test/ExternalNode.java index 705f07d3e2a..f304b71cd4b 100644 --- a/src/test/java/org/elasticsearch/test/ExternalNode.java +++ b/src/test/java/org/elasticsearch/test/ExternalNode.java @@ -212,16 +212,8 @@ final class ExternalNode implements Closeable { } synchronized void stop() { - stop(false); - } - - synchronized void stop(boolean forceKill) { if (running()) { try { - if (forceKill == false && nodeInfo != null && random.nextBoolean()) { - // sometimes shut down gracefully - getClient().admin().cluster().prepareNodesShutdown(this.nodeInfo.getNode().id()).setExit(random.nextBoolean()).setDelay("0s").get(); - } if (this.client != null) { client.close(); } diff --git a/src/test/java/org/elasticsearch/test/InternalTestCluster.java b/src/test/java/org/elasticsearch/test/InternalTestCluster.java index b685448881b..4c857c24027 100644 --- a/src/test/java/org/elasticsearch/test/InternalTestCluster.java +++ b/src/test/java/org/elasticsearch/test/InternalTestCluster.java @@ -38,7 +38,6 @@ import com.google.common.util.concurrent.SettableFuture; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; @@ -79,10 +78,11 @@ import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.cache.filter.FilterCacheModule; import org.elasticsearch.index.cache.filter.FilterCacheModule.FilterCacheSettings; +import org.elasticsearch.index.cache.filter.index.IndexFilterCache; import org.elasticsearch.index.cache.filter.none.NoneFilterCache; -import org.elasticsearch.index.cache.filter.weighted.WeightedFilterCache; import org.elasticsearch.index.shard.IndexShardModule; import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService; @@ -131,7 +131,6 @@ import java.util.concurrent.atomic.AtomicInteger; import static junit.framework.Assert.fail; import static org.apache.lucene.util.LuceneTestCase.TEST_NIGHTLY; -import static org.apache.lucene.util.LuceneTestCase.random; import static org.apache.lucene.util.LuceneTestCase.rarely; import static org.apache.lucene.util.LuceneTestCase.usually; import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder; @@ -180,8 +179,8 @@ public final class InternalTestCluster extends TestCluster { private static final boolean ENABLE_MOCK_MODULES = RandomizedTest.systemPropertyAsBoolean(TESTS_ENABLE_MOCK_MODULES, true); - static final int DEFAULT_MIN_NUM_DATA_NODES = 2; - static final int DEFAULT_MAX_NUM_DATA_NODES = 6; + static final int DEFAULT_MIN_NUM_DATA_NODES = 1; + static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3; static final int DEFAULT_NUM_CLIENT_NODES = -1; static final int DEFAULT_MIN_NUM_CLIENT_NODES = 0; @@ -236,7 +235,6 @@ public final class InternalTestCluster extends TestCluster { super(clusterSeed); this.baseDir = baseDir; this.clusterName = clusterName; - if (minNumDataNodes < 0 || maxNumDataNodes < 0) { throw new IllegalArgumentException("minimum and maximum number of data nodes must be >= 0"); } @@ -365,7 +363,7 @@ public final class InternalTestCluster extends TestCluster { Settings settings = settingsSource.node(nodeOrdinal); if (settings != null) { if (settings.get(ClusterName.SETTING) != null) { - throw new ElasticsearchIllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]"); + throw new IllegalStateException("Tests must not set a '" + ClusterName.SETTING + "' as a node setting set '" + ClusterName.SETTING + "': [" + settings.get(ClusterName.SETTING) + "]"); } builder.put(settings); } @@ -383,7 +381,7 @@ public final class InternalTestCluster extends TestCluster { .put("cluster.routing.schedule", (30 + random.nextInt(50)) + "ms") .put(SETTING_CLUSTER_NODE_SEED, seed); if (ENABLE_MOCK_MODULES && usually(random)) { - builder.put("index.store.type", MockFSIndexStoreModule.class.getName()); // no RAM dir for now! + builder.put(IndexStoreModule.STORE_TYPE, MockFSIndexStoreModule.class.getName()); // no RAM dir for now! builder.put(IndexShardModule.ENGINE_FACTORY, MockEngineFactory.class); builder.put(PageCacheRecyclerModule.CACHE_IMPL, MockPageCacheRecyclerModule.class.getName()); builder.put(BigArraysModule.IMPL, MockBigArraysModule.class.getName()); @@ -451,7 +449,7 @@ public final class InternalTestCluster extends TestCluster { } if (random.nextBoolean()) { - builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? WeightedFilterCache.class : NoneFilterCache.class); + builder.put(FilterCacheModule.FilterCacheSettings.FILTER_CACHE_TYPE, random.nextBoolean() ? IndexFilterCache.class : NoneFilterCache.class); } if (random.nextBoolean()) { diff --git a/src/test/java/org/elasticsearch/test/MockLogAppender.java b/src/test/java/org/elasticsearch/test/MockLogAppender.java new file mode 100644 index 00000000000..2e0c293c1de --- /dev/null +++ b/src/test/java/org/elasticsearch/test/MockLogAppender.java @@ -0,0 +1,139 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test; + +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; +import org.apache.log4j.spi.LoggingEvent; +import org.elasticsearch.common.regex.Regex; + +import java.util.List; + +import static com.google.common.collect.Lists.newArrayList; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.MatcherAssert.assertThat; + +/** + * Test appender that can be used to verify that certain events were logged correctly + */ +public class MockLogAppender extends AppenderSkeleton { + + private final static String COMMON_PREFIX = System.getProperty("es.logger.prefix", "org.elasticsearch."); + + private List expectations; + + public MockLogAppender() { + expectations = newArrayList(); + } + + public void addExpectation(LoggingExpectation expectation) { + expectations.add(expectation); + } + + @Override + protected void append(LoggingEvent loggingEvent) { + for (LoggingExpectation expectation : expectations) { + expectation.match(loggingEvent); + } + } + + @Override + public void close() { + + } + + @Override + public boolean requiresLayout() { + return false; + } + + public void assertAllExpectationsMatched() { + for (LoggingExpectation expectation : expectations) { + expectation.assertMatched(); + } + } + + public interface LoggingExpectation { + void match(LoggingEvent loggingEvent); + + void assertMatched(); + } + + public static abstract class AbstractEventExpectation implements LoggingExpectation { + protected final String name; + protected final String logger; + protected final Level level; + protected final String message; + protected boolean saw; + + public AbstractEventExpectation(String name, String logger, Level level, String message) { + this.name = name; + this.logger = getLoggerName(logger); + this.level = level; + this.message = message; + this.saw = false; + } + + @Override + public void match(LoggingEvent event) { + if (event.getLevel() == level && event.getLoggerName().equals(logger)) { + if (Regex.isSimpleMatchPattern(message)) { + if (Regex.simpleMatch(message, event.getMessage().toString())) { + saw = true; + } + } else { + if (event.getMessage().toString().contains(message)) { + saw = true; + } + } + } + } + } + + public static class UnseenEventExpectation extends AbstractEventExpectation { + + public UnseenEventExpectation(String name, String logger, Level level, String message) { + super(name, logger, level, message); + } + + @Override + public void assertMatched() { + assertThat(name, saw, equalTo(false)); + } + } + + public static class SeenEventExpectation extends AbstractEventExpectation { + + public SeenEventExpectation(String name, String logger, Level level, String message) { + super(name, logger, level, message); + } + + @Override + public void assertMatched() { + assertThat(name, saw, equalTo(true)); + } + } + + private static String getLoggerName(String name) { + if (name.startsWith("org.elasticsearch.")) { + name = name.substring("org.elasticsearch.".length()); + } + return COMMON_PREFIX + name; + } +} diff --git a/src/test/java/org/elasticsearch/test/TestCluster.java b/src/test/java/org/elasticsearch/test/TestCluster.java index adfa32ac1c6..c8d48521b14 100644 --- a/src/test/java/org/elasticsearch/test/TestCluster.java +++ b/src/test/java/org/elasticsearch/test/TestCluster.java @@ -20,7 +20,6 @@ package org.elasticsearch.test; import com.carrotsearch.hppc.ObjectArrayList; -import org.elasticsearch.ElasticsearchIllegalArgumentException; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -81,8 +80,6 @@ public abstract class TestCluster implements Iterable, Closeable { * This method checks all the things that need to be checked after each test */ public void assertAfterTest() throws IOException { - assertAllSearchersClosed(); - assertAllFilesClosed(); ensureEstimatedStats(); } @@ -134,7 +131,7 @@ public abstract class TestCluster implements Iterable, Closeable { assertAcked(client().admin().indices().prepareDelete(indices)); } catch (IndexMissingException e) { // ignore - } catch (ElasticsearchIllegalArgumentException e) { + } catch (IllegalArgumentException e) { // Happens if `action.destructive_requires_name` is set to true // which is the case in the CloseIndexDisableCloseAllTests if ("_all".equals(indices[0])) { diff --git a/src/test/java/org/elasticsearch/test/TestSearchContext.java b/src/test/java/org/elasticsearch/test/TestSearchContext.java index 77448544b1c..bcfa48a5813 100644 --- a/src/test/java/org/elasticsearch/test/TestSearchContext.java +++ b/src/test/java/org/elasticsearch/test/TestSearchContext.java @@ -38,7 +38,6 @@ import org.elasticsearch.index.query.IndexQueryParserService; import org.elasticsearch.index.query.ParsedFilter; import org.elasticsearch.index.query.ParsedQuery; import org.elasticsearch.index.IndexService; -import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.script.ScriptService; @@ -69,7 +68,6 @@ public class TestSearchContext extends SearchContext { final PageCacheRecycler pageCacheRecycler; final BigArrays bigArrays; final IndexService indexService; - final FilterCache filterCache; final IndexFieldDataService indexFieldDataService; final BitsetFilterCache fixedBitSetFilterCache; final ThreadPool threadPool; @@ -84,7 +82,6 @@ public class TestSearchContext extends SearchContext { this.pageCacheRecycler = pageCacheRecycler; this.bigArrays = bigArrays.withCircuitBreaking(); this.indexService = indexService; - this.filterCache = indexService.cache().filter(); this.indexFieldDataService = indexService.fieldData(); this.fixedBitSetFilterCache = indexService.bitsetFilterCache(); this.threadPool = threadPool; @@ -94,7 +91,6 @@ public class TestSearchContext extends SearchContext { this.pageCacheRecycler = null; this.bigArrays = null; this.indexService = null; - this.filterCache = null; this.indexFieldDataService = null; this.threadPool = null; this.fixedBitSetFilterCache = null; @@ -313,11 +309,6 @@ public class TestSearchContext extends SearchContext { return bigArrays; } - @Override - public FilterCache filterCache() { - return filterCache; - } - @Override public BitsetFilterCache bitsetFilterCache() { return fixedBitSetFilterCache; @@ -590,7 +581,7 @@ public class TestSearchContext extends SearchContext { } @Override - public void doClose() throws ElasticsearchException { + public void doClose() { } @Override diff --git a/src/test/java/org/elasticsearch/test/XContentTestUtils.java b/src/test/java/org/elasticsearch/test/XContentTestUtils.java new file mode 100644 index 00000000000..1f1b8eff710 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/XContentTestUtils.java @@ -0,0 +1,104 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test; + +import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Lists; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS; + +public final class XContentTestUtils { + private XContentTestUtils() { + + } + + public static Map convertToMap(ToXContent part) throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + part.toXContent(builder, EMPTY_PARAMS); + builder.endObject(); + return XContentHelper.convertToMap(builder.bytes(), false).v2(); + } + + + /** + * Compares to maps generated from XContentObjects. The order of elements in arrays is ignored + */ + public static boolean mapsEqualIgnoringArrayOrder(Map first, Map second) { + if (first.size() != second.size()) { + return false; + } + + for (String key : first.keySet()) { + if (objectsEqualIgnoringArrayOrder(first.get(key), second.get(key)) == false) { + return false; + } + } + return true; + } + + @SuppressWarnings("unchecked") + private static boolean objectsEqualIgnoringArrayOrder(Object first, Object second) { + if (first == null ) { + return second == null; + } else if (first instanceof List) { + if (second instanceof List) { + List secondList = Lists.newArrayList((List) second); + List firstList = (List) first; + if (firstList.size() == secondList.size()) { + for (Object firstObj : firstList) { + boolean found = false; + for (Object secondObj : secondList) { + if (objectsEqualIgnoringArrayOrder(firstObj, secondObj)) { + secondList.remove(secondObj); + found = true; + break; + } + } + if (found == false) { + return false; + } + } + return secondList.isEmpty(); + } else { + return false; + } + } else { + return false; + } + } else if (first instanceof Map) { + if (second instanceof Map) { + return mapsEqualIgnoringArrayOrder((Map) first, (Map) second); + } else { + return false; + } + } else { + return first.equals(second); + } + } + +} diff --git a/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java b/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java index 557792fc3cf..08f86a7004e 100644 --- a/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java +++ b/src/test/java/org/elasticsearch/test/cache/recycler/MockBigArrays.java @@ -30,7 +30,6 @@ import org.apache.lucene.util.Accountables; import org.apache.lucene.util.BytesRef; import org.elasticsearch.cache.recycler.PageCacheRecycler; import org.elasticsearch.common.inject.Inject; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArray; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.ByteArray; @@ -87,18 +86,16 @@ public class MockBigArrays extends BigArrays { } private final Random random; - private final Settings settings; private final PageCacheRecycler recycler; private final CircuitBreakerService breakerService; @Inject - public MockBigArrays(Settings settings, PageCacheRecycler recycler, CircuitBreakerService breakerService) { - this(settings, recycler, breakerService, false); + public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService) { + this(recycler, breakerService, false); } - public MockBigArrays(Settings settings, PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) { - super(settings, recycler, breakerService, checkBreaker); - this.settings = settings; + public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) { + super(recycler, breakerService, checkBreaker); this.recycler = recycler; this.breakerService = breakerService; long seed; @@ -114,7 +111,7 @@ public class MockBigArrays extends BigArrays { @Override public BigArrays withCircuitBreaking() { - return new MockBigArrays(this.settings, this.recycler, this.breakerService, true); + return new MockBigArrays(this.recycler, this.breakerService, true); } @Override diff --git a/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java index b7c2d07d4d2..6613a786449 100644 --- a/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java +++ b/src/test/java/org/elasticsearch/test/cache/recycler/MockPageCacheRecycler.java @@ -78,7 +78,7 @@ public class MockPageCacheRecycler extends PageCacheRecycler { return new V() { @Override - public void close() throws ElasticsearchException { + public void close() { final Throwable t = ACQUIRED_PAGES.remove(v); if (t == null) { throw new IllegalStateException("Releasing a page that has not been acquired"); diff --git a/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java b/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java index 69b3cee96d4..18f712e7259 100644 --- a/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java +++ b/src/test/java/org/elasticsearch/test/cluster/NoopClusterService.java @@ -19,7 +19,6 @@ package org.elasticsearch.test.cluster; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.Version; import org.elasticsearch.cluster.*; import org.elasticsearch.cluster.block.ClusterBlock; @@ -67,12 +66,12 @@ public class NoopClusterService implements ClusterService { } @Override - public void addInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { } @Override - public void removeInitialStateBlock(ClusterBlock block) throws ElasticsearchIllegalStateException { + public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { } @@ -152,17 +151,17 @@ public class NoopClusterService implements ClusterService { } @Override - public ClusterService start() throws ElasticsearchException { + public ClusterService start() { return null; } @Override - public ClusterService stop() throws ElasticsearchException { + public ClusterService stop() { return null; } @Override - public void close() throws ElasticsearchException { + public void close() { } -} \ No newline at end of file +} diff --git a/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java new file mode 100644 index 00000000000..6a61665e355 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/cluster/TestClusterService.java @@ -0,0 +1,248 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.cluster; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.*; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.cluster.service.PendingClusterTask; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.transport.DummyTransportAddress; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.FutureUtils; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ScheduledFuture; + +/** a class that simulate simple cluster service features, like state storage and listeners */ +public class TestClusterService implements ClusterService { + + volatile ClusterState state; + private final Collection listeners = new CopyOnWriteArrayList<>(); + private final Queue onGoingTimeouts = ConcurrentCollections.newQueue(); + private final ThreadPool threadPool; + + public TestClusterService() { + this(ClusterState.builder(new ClusterName("test")).build()); + } + + public TestClusterService(ThreadPool threadPool) { + this(ClusterState.builder(new ClusterName("test")).build(), threadPool); + } + + public TestClusterService(ClusterState state) { + this(state, null); + } + + public TestClusterService(ClusterState state, @Nullable ThreadPool threadPool) { + if (state.getNodes().size() == 0) { + state = ClusterState.builder(state).nodes( + DiscoveryNodes.builder() + .put(new DiscoveryNode("test_id", DummyTransportAddress.INSTANCE, Version.CURRENT)) + .localNodeId("test_id")).build(); + } + + assert state.getNodes().localNode() != null; + this.state = state; + this.threadPool = threadPool; + + } + + + /** set the current state and trigger any registered listeners about the change */ + public void setState(ClusterState state) { + assert state.getNodes().localNode() != null; + // make sure we have a version increment + state = ClusterState.builder(state).version(this.state.version() + 1).build(); + ClusterChangedEvent event = new ClusterChangedEvent("test", state, this.state); + this.state = state; + for (ClusterStateListener listener : listeners) { + listener.clusterChanged(event); + } + } + + /** set the current state and trigger any registered listeners about the change */ + public void setState(ClusterState.Builder state) { + setState(state.build()); + } + + @Override + public DiscoveryNode localNode() { + return state.getNodes().localNode(); + } + + @Override + public ClusterState state() { + return state; + } + + @Override + public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException { + throw new UnsupportedOperationException(); + + } + + @Override + public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException { + throw new UnsupportedOperationException(); + + } + + @Override + public OperationRouting operationRouting() { + return null; + } + + @Override + public void addFirst(ClusterStateListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void addLast(ClusterStateListener listener) { + listeners.add(listener); + } + + @Override + public void add(ClusterStateListener listener) { + listeners.add(listener); + } + + @Override + public void remove(ClusterStateListener listener) { + listeners.remove(listener); + for (Iterator it = onGoingTimeouts.iterator(); it.hasNext(); ) { + NotifyTimeout timeout = it.next(); + if (timeout.listener.equals(listener)) { + timeout.cancel(); + it.remove(); + } + } + } + + @Override + public void add(LocalNodeMasterListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void remove(LocalNodeMasterListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void add(final TimeValue timeout, final TimeoutClusterStateListener listener) { + if (threadPool == null) { + throw new UnsupportedOperationException("TestClusterService wasn't initialized with a thread pool"); + } + NotifyTimeout notifyTimeout = new NotifyTimeout(listener, timeout); + notifyTimeout.future = threadPool.schedule(timeout, ThreadPool.Names.GENERIC, notifyTimeout); + onGoingTimeouts.add(notifyTimeout); + listeners.add(listener); + listener.postAdded(); + } + + @Override + public void submitStateUpdateTask(String source, Priority priority, ClusterStateUpdateTask updateTask) { + throw new UnsupportedOperationException(); + } + + @Override + public void submitStateUpdateTask(String source, ClusterStateUpdateTask updateTask) { + throw new UnsupportedOperationException(); + } + + @Override + public List pendingTasks() { + throw new UnsupportedOperationException(); + + } + + @Override + public int numberOfPendingTasks() { + throw new UnsupportedOperationException(); + } + + @Override + public Lifecycle.State lifecycleState() { + throw new UnsupportedOperationException(); + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterService start() { + throw new UnsupportedOperationException(); + } + + @Override + public ClusterService stop() { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + throw new UnsupportedOperationException(); + } + + class NotifyTimeout implements Runnable { + final TimeoutClusterStateListener listener; + final TimeValue timeout; + volatile ScheduledFuture future; + + NotifyTimeout(TimeoutClusterStateListener listener, TimeValue timeout) { + this.listener = listener; + this.timeout = timeout; + } + + public void cancel() { + FutureUtils.cancel(future); + } + + @Override + public void run() { + if (future != null && future.isCancelled()) { + return; + } + listener.onTimeout(this.timeout); + // note, we rely on the listener to remove itself in case of timeout if needed + } + } +} diff --git a/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java b/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java index b3d893c4362..fec406a7841 100644 --- a/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java +++ b/src/test/java/org/elasticsearch/test/engine/AssertingSearcher.java @@ -21,17 +21,16 @@ package org.elasticsearch.test.engine; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.ShardId; -import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; /** * A searcher that asserts the IndexReader's refcount on close */ -public class AssertingSearcher extends Engine.Searcher { +class AssertingSearcher extends Engine.Searcher { private final Engine.Searcher wrappedSearcher; private final ShardId shardId; private final IndexSearcher indexSearcher; @@ -39,10 +38,10 @@ public class AssertingSearcher extends Engine.Searcher { private final Object lock = new Object(); private final int initialRefCount; private final ESLogger logger; - private final Map inFlightSearchers; + private final AtomicBoolean closed = new AtomicBoolean(false); - public AssertingSearcher(IndexSearcher indexSearcher, Engine.Searcher wrappedSearcher, - ShardId shardId, Map inFlightSearchers, + AssertingSearcher(IndexSearcher indexSearcher, final Engine.Searcher wrappedSearcher, + ShardId shardId, ESLogger logger) { super(wrappedSearcher.source(), indexSearcher); // we only use the given index searcher here instead of the IS of the wrapped searcher. the IS might be a wrapped searcher @@ -53,8 +52,6 @@ public class AssertingSearcher extends Engine.Searcher { initialRefCount = wrappedSearcher.reader().getRefCount(); this.indexSearcher = indexSearcher; assert initialRefCount > 0 : "IndexReader#getRefCount() was [" + initialRefCount + "] expected a value > [0] - reader is already closed"; - this.inFlightSearchers = inFlightSearchers; - this.inFlightSearchers.put(this, new RuntimeException("Unreleased Searcher, source [" + wrappedSearcher.source() + "]")); } @Override @@ -63,30 +60,26 @@ public class AssertingSearcher extends Engine.Searcher { } @Override - public void close() throws ElasticsearchException { - RuntimeException remove = inFlightSearchers.remove(this); + public void close() { synchronized (lock) { - // make sure we only get this once and store the stack of the first caller! - if (remove == null) { - assert firstReleaseStack != null; + if (closed.compareAndSet(false, true)) { + firstReleaseStack = new RuntimeException(); + final int refCount = wrappedSearcher.reader().getRefCount(); + // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential + // problems. + assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]"; + try { + wrappedSearcher.close(); + } catch (RuntimeException ex) { + logger.debug("Failed to release searcher", ex); + throw ex; + } + } else { AssertionError error = new AssertionError("Released Searcher more than once, source [" + wrappedSearcher.source() + "]"); error.initCause(firstReleaseStack); throw error; - } else { - assert firstReleaseStack == null; - firstReleaseStack = new RuntimeException("Searcher Released first here, source [" + wrappedSearcher.source() + "]"); } } - final int refCount = wrappedSearcher.reader().getRefCount(); - // this assert seems to be paranoid but given LUCENE-5362 we better add some assertions here to make sure we catch any potential - // problems. - assert refCount > 0 : "IndexReader#getRefCount() was [" + refCount + "] expected a value > [0] - reader is already closed. Initial refCount was: [" + initialRefCount + "]"; - try { - wrappedSearcher.close(); - } catch (RuntimeException ex) { - logger.debug("Failed to release searcher", ex); - throw ex; - } } @Override @@ -102,4 +95,8 @@ public class AssertingSearcher extends Engine.Searcher { public ShardId shardId() { return shardId; } + + public boolean isOpen() { + return closed.get() == false; + } } diff --git a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java index 1c3581d2ae5..b321a0dfbb2 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java +++ b/src/test/java/org/elasticsearch/test/engine/MockEngineSupport.java @@ -24,7 +24,10 @@ import org.apache.lucene.index.FilterDirectoryReader; import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.QueryCache; +import org.apache.lucene.search.QueryCachingPolicy; import org.apache.lucene.search.SearcherManager; +import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; @@ -32,15 +35,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; -import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import java.io.Closeable; import java.io.IOException; import java.lang.reflect.Constructor; -import java.util.Map; +import java.util.IdentityHashMap; import java.util.Random; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -52,14 +54,20 @@ public final class MockEngineSupport { public static final String WRAP_READER_RATIO = "index.engine.mock.random.wrap_reader_ratio"; public static final String READER_WRAPPER_TYPE = "index.engine.mock.random.wrapper"; public static final String FLUSH_ON_CLOSE_RATIO = "index.engine.mock.flush_on_close.ratio"; + private final AtomicBoolean closing = new AtomicBoolean(false); private final ESLogger logger = Loggers.getLogger(Engine.class); + private final ShardId shardId; + private final QueryCache filterCache; + private final QueryCachingPolicy filterCachingPolicy; + private final SearcherCloseable searcherCloseable; + private final MockContext mockContext; public static class MockContext { - public final Random random; - public final boolean wrapReader; - public final Class wrapper; - public final Settings indexSettings; + private final Random random; + private final boolean wrapReader; + private final Class wrapper; + private final Settings indexSettings; private final double flushOnClose; public MockContext(Random random, boolean wrapReader, Class wrapper, Settings indexSettings) { @@ -71,21 +79,22 @@ public final class MockEngineSupport { } } - public static final ConcurrentMap INFLIGHT_ENGINE_SEARCHERS = new ConcurrentHashMap<>(); - - private final MockContext mockContext; - public MockEngineSupport(EngineConfig config) { Settings indexSettings = config.getIndexSettings(); + shardId = config.getShardId(); + filterCache = config.getFilterCache(); + filterCachingPolicy = config.getFilterCachingPolicy(); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); Random random = new Random(seed); final double ratio = indexSettings.getAsDouble(WRAP_READER_RATIO, 0.0d); // DISABLED by default - AssertingDR is crazy slow Class wrapper = indexSettings.getAsClass(READER_WRAPPER_TYPE, AssertingDirectoryReader.class); boolean wrapReader = random.nextDouble() < ratio; if (logger.isTraceEnabled()) { - logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), config.getShardId(), seed, wrapReader); + logger.trace("Using [{}] for shard [{}] seed: [{}] wrapReader: [{}]", this.getClass().getName(), shardId, seed, wrapReader); } mockContext = new MockContext(random, wrapReader, wrapper, indexSettings); + this.searcherCloseable = new SearcherCloseable(); + LuceneTestCase.closeAfterSuite(searcherCloseable); // only one suite closeable per Engine } enum CloseAction { @@ -99,41 +108,33 @@ public final class MockEngineSupport { * the first call and treats subsequent calls as if the engine passed is already closed. */ public CloseAction flushOrClose(Engine engine, CloseAction originalAction) throws IOException { - try { - if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow. - if (mockContext.flushOnClose > mockContext.random.nextDouble()) { - return CloseAction.FLUSH_AND_CLOSE; - } else { - return CloseAction.CLOSE; - } + if (closing.compareAndSet(false, true)) { // only do the random thing if we are the first call to this since super.flushOnClose() calls #close() again and then we might end up with a stackoverflow. + if (mockContext.flushOnClose > mockContext.random.nextDouble()) { + return CloseAction.FLUSH_AND_CLOSE; } else { - return originalAction; - } - } finally { - if (logger.isTraceEnabled()) { - // log debug if we have pending searchers - for (Map.Entry entry : INFLIGHT_ENGINE_SEARCHERS.entrySet()) { - logger.trace("Unreleased Searchers instance for shard [{}]", - entry.getValue(), entry.getKey().shardId()); - } + return CloseAction.CLOSE; } + } else { + return originalAction; } } - public AssertingIndexSearcher newSearcher(Engine engine, String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { + public AssertingIndexSearcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { IndexReader reader = searcher.getIndexReader(); IndexReader wrappedReader = reader; assert reader != null; if (reader instanceof DirectoryReader && mockContext.wrapReader) { - wrappedReader = wrapReader((DirectoryReader) reader, engine); + wrappedReader = wrapReader((DirectoryReader) reader); } // this executes basic query checks and asserts that weights are normalized only once etc. final AssertingIndexSearcher assertingIndexSearcher = new AssertingIndexSearcher(mockContext.random, wrappedReader); assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); + assertingIndexSearcher.setQueryCache(filterCache); + assertingIndexSearcher.setQueryCachingPolicy(filterCachingPolicy); return assertingIndexSearcher; } - private DirectoryReader wrapReader(DirectoryReader reader, Engine engine) { + private DirectoryReader wrapReader(DirectoryReader reader) { try { Constructor[] constructors = mockContext.wrapper.getConstructors(); Constructor nonRandom = null; @@ -177,4 +178,50 @@ public final class MockEngineSupport { } + public Engine.Searcher wrapSearcher(String source, Engine.Searcher engineSearcher, IndexSearcher searcher, SearcherManager manager) { + final AssertingIndexSearcher assertingIndexSearcher = newSearcher(source, searcher, manager); + assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); + // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will + // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager + // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here + AssertingSearcher assertingSearcher = new AssertingSearcher(assertingIndexSearcher, engineSearcher, shardId, logger) { + @Override + public void close() { + try { + searcherCloseable.remove(this); + } finally { + super.close(); + } + } + }; + searcherCloseable.add(assertingSearcher, engineSearcher.source()); + return assertingSearcher; + } + + private static final class SearcherCloseable implements Closeable { + + private final IdentityHashMap openSearchers = new IdentityHashMap<>(); + + @Override + public synchronized void close() throws IOException { + if (openSearchers.isEmpty() == false) { + AssertionError error = new AssertionError("Unreleased searchers found"); + for (RuntimeException ex : openSearchers.values()) { + error.addSuppressed(ex); + } + throw error; + } + } + + void add(AssertingSearcher searcher, String source) { + final RuntimeException ex = new RuntimeException("Unreleased Searcher, source [" + source+ "]"); + synchronized (this) { + openSearchers.put(searcher, ex); + } + } + + synchronized void remove(AssertingSearcher searcher) { + openSearchers.remove(searcher); + } + } } diff --git a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java index ed08a95e86f..e81009c4979 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java +++ b/src/test/java/org/elasticsearch/test/engine/MockInternalEngine.java @@ -21,6 +21,7 @@ package org.elasticsearch.test.engine; import org.apache.lucene.search.AssertingIndexSearcher; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.SearcherManager; +import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; @@ -71,12 +72,7 @@ final class MockInternalEngine extends InternalEngine { @Override protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { - final AssertingIndexSearcher assertingIndexSearcher = support().newSearcher(this, source, searcher, manager); - assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); - // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will - // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager - // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here - return new AssertingSearcher(assertingIndexSearcher, - super.newSearcher(source, searcher, manager), shardId, MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS, logger); + final Searcher engineSearcher = super.newSearcher(source, searcher, manager); + return support().wrapSearcher(source, engineSearcher, searcher, manager); } } diff --git a/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java b/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java index 198d4ba6639..1ed920b20fc 100644 --- a/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java +++ b/src/test/java/org/elasticsearch/test/engine/MockShadowEngine.java @@ -37,29 +37,10 @@ final class MockShadowEngine extends ShadowEngine { this.support = new MockEngineSupport(config); } - @Override - public void close() throws IOException { - try { - super.close(); - } finally { - if (logger.isTraceEnabled()) { - // log debug if we have pending searchers - for (Map.Entry entry : MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.entrySet()) { - logger.trace("Unreleased Searchers instance for shard [{}]", entry.getValue(), entry.getKey().shardId()); - } - } - } - } - @Override protected Searcher newSearcher(String source, IndexSearcher searcher, SearcherManager manager) throws EngineException { - final AssertingIndexSearcher assertingIndexSearcher = support.newSearcher(this, source, searcher, manager); - assertingIndexSearcher.setSimilarity(searcher.getSimilarity()); - // pass the original searcher to the super.newSearcher() method to make sure this is the searcher that will - // be released later on. If we wrap an index reader here must not pass the wrapped version to the manager - // on release otherwise the reader will be closed too early. - good news, stuff will fail all over the place if we don't get this right here - return new AssertingSearcher(assertingIndexSearcher, - super.newSearcher(source, searcher, manager), shardId, MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS, logger); + final Searcher engineSearcher = super.newSearcher(source, searcher, manager); + return support.wrapSearcher(source, engineSearcher, searcher, manager); } } diff --git a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 271a71466c2..7a822e163cb 100644 --- a/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/src/test/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -26,7 +26,6 @@ import com.google.common.collect.Iterables; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionFuture; @@ -54,6 +53,8 @@ import org.elasticsearch.action.search.ShardSearchFailure; import org.elasticsearch.action.support.broadcast.BroadcastOperationResponse; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.cluster.block.ClusterBlock; +import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.bytes.BytesReference; @@ -65,9 +66,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.engine.AssertingSearcher; -import org.elasticsearch.test.engine.MockEngineSupport; -import org.elasticsearch.test.store.MockDirectoryHelper; import org.hamcrest.Matcher; import org.hamcrest.Matchers; import org.junit.Assert; @@ -78,11 +76,11 @@ import java.lang.reflect.InvocationTargetException; import java.nio.file.Files; import java.nio.file.Path; import java.util.*; -import java.util.concurrent.TimeUnit; import static com.google.common.base.Predicates.isNull; import static org.elasticsearch.test.ElasticsearchTestCase.*; import static org.elasticsearch.test.VersionUtils.randomVersion; +import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; @@ -118,6 +116,42 @@ public class ElasticsearchAssertions { assertVersionSerializable(response); } + /** + * Executes the request and fails if the request has not been blocked. + * + * @param builder the request builder + */ + public static void assertBlocked(ActionRequestBuilder builder) { + assertBlocked(builder, null); + } + + /** + * Executes the request and fails if the request has not been blocked by a specific {@link ClusterBlock}. + * + * @param builder the request builder + * @param expectedBlock the expected block + */ + public static void assertBlocked(ActionRequestBuilder builder, ClusterBlock expectedBlock) { + try { + builder.get(); + fail("Request executed with success but a ClusterBlockException was expected"); + } catch (ClusterBlockException e) { + assertThat(e.blocks().size(), greaterThan(0)); + assertThat(e.status(), equalTo(RestStatus.FORBIDDEN)); + + if (expectedBlock != null) { + boolean found = false; + for (ClusterBlock clusterBlock : e.blocks()) { + if (clusterBlock.id() == expectedBlock.id()) { + found = true; + break; + } + } + assertThat("Request should have been blocked by [" + expectedBlock + "] instead of " + e.blocks(), found, equalTo(true)); + } + } + } + public static String formatShardStatus(BroadcastOperationResponse response) { String msg = " Total shards: " + response.getTotalShards() + " Successful shards: " + response.getSuccessfulShards() + " & " + response.getFailedShards() + " shard failures:"; @@ -271,7 +305,7 @@ public class ElasticsearchAssertions { assertVersionSerializable(searchResponse); } catch (SearchPhaseExecutionException e) { assertThat(e.status(), equalTo(restStatus)); - assertThat(e.getMessage(), reasonMatcher); + assertThat(e.toString(), reasonMatcher); for (ShardSearchFailure shardSearchFailure : e.shardFailures()) { assertThat(shardSearchFailure.status(), equalTo(restStatus)); assertThat(shardSearchFailure.reason(), reasonMatcher); @@ -644,67 +678,6 @@ public class ElasticsearchAssertions { return response; } - public static void assertAllSearchersClosed() { - /* in some cases we finish a test faster than the freeContext calls make it to the - * shards. Let's wait for some time if there are still searchers. If the are really - * pending we will fail anyway.*/ - try { - if (awaitBusy(new Predicate() { - @Override - public boolean apply(Object o) { - return MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.isEmpty(); - } - }, 5, TimeUnit.SECONDS)) { - return; - } - } catch (InterruptedException ex) { - if (MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.isEmpty()) { - return; - } - } - try { - RuntimeException ex = null; - StringBuilder builder = new StringBuilder("Unclosed Searchers instance for shards: ["); - for (Map.Entry entry : MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.entrySet()) { - ex = entry.getValue(); - builder.append(entry.getKey().shardId()).append(","); - } - builder.append("]"); - throw new RuntimeException(builder.toString(), ex); - } finally { - MockEngineSupport.INFLIGHT_ENGINE_SEARCHERS.clear(); - } - } - - public static void assertAllFilesClosed() { - try { - for (final MockDirectoryHelper.ElasticsearchMockDirectoryWrapper w : MockDirectoryHelper.wrappers) { - try { - w.awaitClosed(5000); - } catch (InterruptedException e) { - Thread.interrupted(); - } - if (!w.successfullyClosed()) { - if (w.closeException() == null) { - try { - w.close(); - } catch (IOException e) { - throw new ElasticsearchIllegalStateException("directory close threw IOException", e); - } - if (w.closeException() != null) { - throw w.closeException(); - } - } else { - throw w.closeException(); - } - } - assertThat(w.isOpen(), is(false)); - } - } finally { - MockDirectoryHelper.wrappers.clear(); - } - } - public static void assertNodeContainsPlugins(NodesInfoResponse response, String nodeId, List expectedJvmPluginNames, List expectedJvmPluginDescriptions, diff --git a/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java b/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java index d73f501a3ab..4d74b049612 100644 --- a/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java +++ b/src/test/java/org/elasticsearch/test/index/merge/NoMergePolicyProvider.java @@ -41,6 +41,6 @@ public class NoMergePolicyProvider extends AbstractMergePolicyProvider errors) { + logger.info("Stash dump on failure [{}]", XContentHelper.toString(restTestExecutionContext.stash())); + super.afterIfFailed(errors); + } + @Override protected Settings nodeSettings(int nodeOrdinal) { return ImmutableSettings.builder() @@ -227,9 +219,6 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration if (!restApi.getMethods().contains("POST")) { errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support POST"); } - if (!restApi.getParams().contains("source")) { - errorMessage.append("\n- ").append(restApi.getName()).append(" supports GET with a body but doesn't support the source query string parameter"); - } } } if (errorMessage.length() > 0) { @@ -253,7 +242,10 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration @Override protected int maximumNumberOfReplicas() { - return 1; // never go crazy in the REST tests + // hardcoded 1 since this is what clients also do and our tests must expect that we have only node + // with replicas set to 1 ie. the cluster won't be green + return 1; + } /** @@ -315,86 +307,4 @@ public abstract class ElasticsearchRestTestCase extends ElasticsearchIntegration executableSection.execute(restTestExecutionContext); } } - - // don't look any further: NO TOUCHY! - - public static class Rest0Tests extends ElasticsearchRestTestCase { - public Rest0Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(0, 8); - } - } - - public static class Rest1Tests extends ElasticsearchRestTestCase { - public Rest1Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(1, 8); - } - } - - public static class Rest2Tests extends ElasticsearchRestTestCase { - public Rest2Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(2, 8); - } - } - - public static class Rest3Tests extends ElasticsearchRestTestCase { - public Rest3Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(3, 8); - } - } - - public static class Rest4Tests extends ElasticsearchRestTestCase { - public Rest4Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(4, 8); - } - } - - public static class Rest5Tests extends ElasticsearchRestTestCase { - public Rest5Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(5, 8); - } - } - - public static class Rest6Tests extends ElasticsearchRestTestCase { - public Rest6Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(6, 8); - } - } - - public static class Rest7Tests extends ElasticsearchRestTestCase { - public Rest7Tests(@Name("yaml") RestTestCandidate testCandidate) { - super(testCandidate); - } - @ParametersFactory - public static Iterable parameters() throws IOException, RestTestParseException { - return createParameters(7, 8); - } - } } diff --git a/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java new file mode 100644 index 00000000000..f86836876c5 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest0Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 0 */ +public class Rest0Tests extends ElasticsearchRestTestCase { + public Rest0Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(0, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java new file mode 100644 index 00000000000..d75444fe005 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest1Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 1 */ +public class Rest1Tests extends ElasticsearchRestTestCase { + public Rest1Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(1, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java new file mode 100644 index 00000000000..1d01ecc58ec --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest2Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 2 */ +public class Rest2Tests extends ElasticsearchRestTestCase { + public Rest2Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(2, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java new file mode 100644 index 00000000000..044e182e7a6 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest3Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 3 */ +public class Rest3Tests extends ElasticsearchRestTestCase { + public Rest3Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(3, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java new file mode 100644 index 00000000000..75213143b9c --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest4Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 4 */ +public class Rest4Tests extends ElasticsearchRestTestCase { + public Rest4Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(4, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java new file mode 100644 index 00000000000..a2c1af46dd0 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest5Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 5 */ +public class Rest5Tests extends ElasticsearchRestTestCase { + public Rest5Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(5, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java new file mode 100644 index 00000000000..bb7ccd10035 --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest6Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 6 */ +public class Rest6Tests extends ElasticsearchRestTestCase { + public Rest6Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(6, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java b/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java new file mode 100644 index 00000000000..aba7c03136b --- /dev/null +++ b/src/test/java/org/elasticsearch/test/rest/Rest7Tests.java @@ -0,0 +1,38 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.elasticsearch.test.rest.parser.RestTestParseException; + +import java.io.IOException; + +/** Rest API tests subset 7 */ +public class Rest7Tests extends ElasticsearchRestTestCase { + public Rest7Tests(@Name("yaml") RestTestCandidate testCandidate) { + super(testCandidate); + } + @ParametersFactory + public static Iterable parameters() throws IOException, RestTestParseException { + return createParameters(7, 8); + } +} diff --git a/src/test/java/org/elasticsearch/test/rest/Stash.java b/src/test/java/org/elasticsearch/test/rest/Stash.java index 398b663cddb..4d0a1fb7fea 100644 --- a/src/test/java/org/elasticsearch/test/rest/Stash.java +++ b/src/test/java/org/elasticsearch/test/rest/Stash.java @@ -23,7 +23,10 @@ import com.google.common.collect.Maps; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import java.io.IOException; import java.util.List; import java.util.Map; @@ -31,7 +34,7 @@ import java.util.Map; * Allows to cache the last obtained test response and or part of it within variables * that can be used as input values in following requests and assertions. */ -public class Stash { +public class Stash implements ToXContent { private static final ESLogger logger = Loggers.getLogger(Stash.class); @@ -43,7 +46,7 @@ public class Stash { * Allows to saved a specific field in the stash as key-value pair */ public void stashValue(String key, Object value) { - logger.debug("stashing [{}]=[{}]", key, value); + logger.trace("stashing [{}]=[{}]", key, value); Object old = stash.put(key, value); if (old != null && old != value) { logger.trace("replaced stashed value [{}] with same key [{}]", old, key); @@ -116,4 +119,10 @@ public class Stash { } } } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("stash", stash); + return builder; + } } diff --git a/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java index bf8fe82a2c1..e7ab4555776 100644 --- a/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java +++ b/src/test/java/org/elasticsearch/test/rest/section/SkipSection.java @@ -97,6 +97,9 @@ public class SkipSection { if (versionRange == null) { return new Version[] { null, null }; } + if (versionRange.trim().equals("all")) { + return new Version[]{VersionUtils.getFirstVersion(), Version.CURRENT}; + } String[] skipVersions = versionRange.split("-"); if (skipVersions.length > 2) { throw new IllegalArgumentException("version range malformed: " + versionRange); diff --git a/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java index 5d940a10b56..1e71ae9181c 100644 --- a/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java +++ b/src/test/java/org/elasticsearch/test/rest/test/SkipSectionParserTests.java @@ -49,6 +49,23 @@ public class SkipSectionParserTests extends AbstractParserTests { assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); } + public void testParseSkipSectionAllVersions() throws Exception { + parser = YamlXContent.yamlXContent.createParser( + "version: \" all \"\n" + + "reason: Delete ignores the parent param" + ); + + SkipSectionParser skipSectionParser = new SkipSectionParser(); + + SkipSection skipSection = skipSectionParser.parse(new RestTestSuiteParseContext("api", "suite", parser)); + + assertThat(skipSection, notNullValue()); + assertThat(skipSection.getLowerVersion(), equalTo(VersionUtils.getFirstVersion())); + assertThat(skipSection.getUpperVersion(), equalTo(Version.CURRENT)); + assertThat(skipSection.getFeatures().size(), equalTo(0)); + assertThat(skipSection.getReason(), equalTo("Delete ignores the parent param")); + } + @Test public void testParseSkipSectionFeatureNoVersion() throws Exception { parser = YamlXContent.yamlXContent.createParser( diff --git a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java b/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java deleted file mode 100644 index 06b844b950a..00000000000 --- a/src/test/java/org/elasticsearch/test/store/MockDirectoryHelper.java +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.test.store; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.MMapDirectory; -import org.apache.lucene.store.MockDirectoryWrapper.Throttling; -import org.apache.lucene.store.MockDirectoryWrapper; -import org.apache.lucene.store.NRTCachingDirectory; -import org.apache.lucene.util.Constants; -import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ConcurrentCollections; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.shard.ShardPath; -import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.IndexStore; -import org.elasticsearch.index.store.fs.*; -import com.carrotsearch.randomizedtesting.SeedUtils; - -import java.io.IOException; -import java.lang.reflect.Field; -import java.util.Collection; -import java.util.Random; -import java.util.Set; - -public class MockDirectoryHelper { - public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate"; - public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open"; - public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write"; - public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file"; - public static final String CRASH_INDEX = "index.store.mock.random.crash_index"; - - public static final Set wrappers = ConcurrentCollections.newConcurrentSet(); - - private final Random random; - private final double randomIOExceptionRate; - private final double randomIOExceptionRateOnOpen; - private final Throttling throttle; - private final Settings indexSettings; - private final ShardId shardId; - private final boolean preventDoubleWrite; - private final boolean noDeleteOpenFile; - private final ESLogger logger; - private final boolean crashIndex; - - public MockDirectoryHelper(ShardId shardId, Settings indexSettings, ESLogger logger, Random random, long seed) { - this.random = random; - randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d); - randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d); - preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW - noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW - random.nextInt(shardId.getId() + 1); // some randomness per shard - throttle = Throttling.NEVER; - crashIndex = indexSettings.getAsBoolean(CRASH_INDEX, true); - - if (logger.isDebugEnabled()) { - logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), - throttle, crashIndex); - } - this.indexSettings = indexSettings; - this.shardId = shardId; - this.logger = logger; - } - - public Directory wrap(Directory dir) { - final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, logger, this.crashIndex); - w.setRandomIOExceptionRate(randomIOExceptionRate); - w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); - w.setThrottling(throttle); - w.setCheckIndexOnClose(false); // we do this on the index level - w.setPreventDoubleWrite(preventDoubleWrite); - // TODO: make this test robust to virus scanner - w.setEnableVirusScanner(false); - w.setNoDeleteOpenFile(noDeleteOpenFile); - w.setUseSlowOpenClosers(false); - wrappers.add(w); - return w; - } - - public FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) { - if ((Constants.WINDOWS || Constants.SUN_OS) && Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { - return new MmapFsDirectoryService(shardId, indexSettings, indexStore, path); - } else if (Constants.WINDOWS) { - return new SimpleFsDirectoryService(shardId, indexSettings, indexStore, path); - } - switch (random.nextInt(4)) { - case 2: - return new DefaultFsDirectoryService(shardId, indexSettings, indexStore, path); - case 1: - return new MmapFsDirectoryService(shardId, indexSettings, indexStore, path); - case 0: - if (random.nextInt(10) == 0) { - // use simplefs less, it synchronizes all threads reads - return new SimpleFsDirectoryService(shardId, indexSettings, indexStore, path); - } - default: - return new NioFsDirectoryService(shardId, indexSettings, indexStore, path); - } - } - - public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { - - private final ESLogger logger; - private final boolean crash; - private volatile RuntimeException closeException; - private final Object lock = new Object(); - private final Set superUnSyncedFiles; - private final Random superRandomState; - - public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, ESLogger logger, boolean crash) { - super(random, delegate); - this.crash = crash; - this.logger = logger; - - // TODO: remove all this and cutover to MockFS (DisableFsyncFS) instead - try { - Field field = MockDirectoryWrapper.class.getDeclaredField("unSyncedFiles"); - field.setAccessible(true); - superUnSyncedFiles = (Set) field.get(this); - - field = MockDirectoryWrapper.class.getDeclaredField("randomState"); - field.setAccessible(true); - superRandomState = (Random) field.get(this); - } catch (ReflectiveOperationException roe) { - throw new RuntimeException(roe); - } - } - - @Override - public synchronized void close() throws IOException { - try { - super.close(); - } catch (RuntimeException ex) { - logger.info("MockDirectoryWrapper#close() threw exception", ex); - closeException = ex; - throw ex; - } finally { - synchronized (lock) { - lock.notifyAll(); - } - } - } - - /** - * Returns true if {@link #in} must sync its files. - * Currently, only {@link NRTCachingDirectory} requires sync'ing its files - * because otherwise they are cached in an internal {@link org.apache.lucene.store.RAMDirectory}. If - * other directories require that too, they should be added to this method. - */ - private boolean mustSync() { - Directory delegate = in; - while (delegate instanceof FilterDirectory) { - if (delegate instanceof NRTCachingDirectory) { - return true; - } - delegate = ((FilterDirectory) delegate).getDelegate(); - } - return delegate instanceof NRTCachingDirectory; - } - - @Override - public synchronized void sync(Collection names) throws IOException { - // don't wear out our hardware so much in tests. - if (superRandomState.nextInt(100) == 0 || mustSync()) { - super.sync(names); - } else { - superUnSyncedFiles.removeAll(names); - } - } - - public void awaitClosed(long timeout) throws InterruptedException { - synchronized (lock) { - if(isOpen()) { - lock.wait(timeout); - } - } - } - - public synchronized boolean successfullyClosed() { - return closeException == null && !isOpen(); - } - - public synchronized RuntimeException closeException() { - return closeException; - } - - @Override - public synchronized void crash() throws IOException { - if (crash) { - super.crash(); - } - } - } -} diff --git a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java index 23825b3b3ae..d07b3b7c4d5 100644 --- a/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java +++ b/src/test/java/org/elasticsearch/test/store/MockFSDirectoryService.java @@ -19,57 +19,86 @@ package org.elasticsearch.test.store; +import com.carrotsearch.randomizedtesting.SeedUtils; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.google.common.base.Charsets; import org.apache.lucene.index.CheckIndex; import org.apache.lucene.index.IndexWriter; -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.StoreRateLimiting; +import org.apache.lucene.store.*; +import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.TestRuleMarkFailure; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.lease.Releasables; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.ImmutableSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.settings.IndexSettings; import org.elasticsearch.index.shard.*; import org.elasticsearch.index.store.IndexStore; +import org.elasticsearch.index.store.IndexStoreModule; import org.elasticsearch.index.store.Store; -import org.elasticsearch.index.store.fs.FsDirectoryService; +import org.elasticsearch.index.store.FsDirectoryService; import org.elasticsearch.indices.IndicesLifecycle; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.ElasticsearchIntegrationTest; +import org.junit.Assert; +import java.io.Closeable; import java.io.IOException; import java.io.PrintStream; +import java.lang.reflect.Field; import java.nio.file.Path; -import java.util.Arrays; -import java.util.EnumSet; -import java.util.Random; +import java.util.*; public class MockFSDirectoryService extends FsDirectoryService { + public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close"; + public static final String RANDOM_IO_EXCEPTION_RATE_ON_OPEN = "index.store.mock.random.io_exception_rate_on_open"; + public static final String RANDOM_PREVENT_DOUBLE_WRITE = "index.store.mock.random.prevent_double_write"; + public static final String RANDOM_NO_DELETE_OPEN_FILE = "index.store.mock.random.no_delete_open_file"; + public static final String CRASH_INDEX = "index.store.mock.random.crash_index"; + private static final EnumSet validCheckIndexStates = EnumSet.of( IndexShardState.STARTED, IndexShardState.RELOCATED , IndexShardState.POST_RECOVERY ); - private final MockDirectoryHelper helper; - private FsDirectoryService delegateService; - public static final String CHECK_INDEX_ON_CLOSE = "index.store.mock.check_index_on_close"; + private final FsDirectoryService delegateService; private final boolean checkIndexOnClose; + private final Random random; + private final double randomIOExceptionRate; + private final double randomIOExceptionRateOnOpen; + private final MockDirectoryWrapper.Throttling throttle; + private final Settings indexSettings; + private final boolean preventDoubleWrite; + private final boolean noDeleteOpenFile; + private final boolean crashIndex; @Inject - public MockFSDirectoryService(final ShardId shardId, @IndexSettings Settings indexSettings, IndexStore indexStore, final IndicesService service, final ShardPath path) { - super(shardId, indexSettings, indexStore, path); + public MockFSDirectoryService(@IndexSettings Settings indexSettings, IndexStore indexStore, final IndicesService service, final ShardPath path) { + super(indexSettings, indexStore, path); final long seed = indexSettings.getAsLong(ElasticsearchIntegrationTest.SETTING_INDEX_SEED, 0l); - Random random = new Random(seed); - helper = new MockDirectoryHelper(shardId, indexSettings, logger, random, seed); + this.random = new Random(seed); checkIndexOnClose = indexSettings.getAsBoolean(CHECK_INDEX_ON_CLOSE, true); + randomIOExceptionRate = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE, 0.0d); + randomIOExceptionRateOnOpen = indexSettings.getAsDouble(RANDOM_IO_EXCEPTION_RATE_ON_OPEN, 0.0d); + preventDoubleWrite = indexSettings.getAsBoolean(RANDOM_PREVENT_DOUBLE_WRITE, true); // true is default in MDW + noDeleteOpenFile = indexSettings.getAsBoolean(RANDOM_NO_DELETE_OPEN_FILE, random.nextBoolean()); // true is default in MDW + random.nextInt(shardId.getId() + 1); // some randomness per shard + throttle = MockDirectoryWrapper.Throttling.NEVER; + crashIndex = indexSettings.getAsBoolean(CRASH_INDEX, true); - delegateService = helper.randomDirectorService(indexStore, path); + if (logger.isDebugEnabled()) { + logger.debug("Using MockDirWrapper with seed [{}] throttle: [{}] crashIndex: [{}]", SeedUtils.formatSeed(seed), + throttle, crashIndex); + } + this.indexSettings = indexSettings; + delegateService = randomDirectorService(indexStore, path); if (checkIndexOnClose) { final IndicesLifecycle.Listener listener = new IndicesLifecycle.Listener() { @@ -112,7 +141,7 @@ public class MockFSDirectoryService extends FsDirectoryService { @Override public Directory newDirectory() throws IOException { - return helper.wrap(delegateService.newDirectory()); + return wrap(delegateService.newDirectory()); } @Override @@ -173,4 +202,117 @@ public class MockFSDirectoryService extends FsDirectoryService { public long throttleTimeInNanos() { return delegateService.throttleTimeInNanos(); } + + public static final String RANDOM_IO_EXCEPTION_RATE = "index.store.mock.random.io_exception_rate"; + + private Directory wrap(Directory dir) { + final ElasticsearchMockDirectoryWrapper w = new ElasticsearchMockDirectoryWrapper(random, dir, this.crashIndex); + w.setRandomIOExceptionRate(randomIOExceptionRate); + w.setRandomIOExceptionRateOnOpen(randomIOExceptionRateOnOpen); + w.setThrottling(throttle); + w.setCheckIndexOnClose(false); // we do this on the index level + w.setPreventDoubleWrite(preventDoubleWrite); + // TODO: make this test robust to virus scanner + w.setEnableVirusScanner(false); + w.setNoDeleteOpenFile(noDeleteOpenFile); + w.setUseSlowOpenClosers(false); + LuceneTestCase.closeAfterSuite(new CloseableDirectory(w)); + return w; + } + + private FsDirectoryService randomDirectorService(IndexStore indexStore, ShardPath path) { + ImmutableSettings.Builder builder = ImmutableSettings.settingsBuilder(); + builder.put(indexSettings); + builder.put(IndexStoreModule.STORE_TYPE, RandomPicks.randomFrom(random, IndexStoreModule.Type.values())); + return new FsDirectoryService(builder.build(), indexStore, path); + } + + public static final class ElasticsearchMockDirectoryWrapper extends MockDirectoryWrapper { + + private final boolean crash; + private final Set superUnSyncedFiles; + private final Random superRandomState; + + public ElasticsearchMockDirectoryWrapper(Random random, Directory delegate, boolean crash) { + super(random, delegate); + this.crash = crash; + + // TODO: remove all this and cutover to MockFS (DisableFsyncFS) instead + try { + Field field = MockDirectoryWrapper.class.getDeclaredField("unSyncedFiles"); + field.setAccessible(true); + superUnSyncedFiles = (Set) field.get(this); + + field = MockDirectoryWrapper.class.getDeclaredField("randomState"); + field.setAccessible(true); + superRandomState = (Random) field.get(this); + } catch (ReflectiveOperationException roe) { + throw new RuntimeException(roe); + } + } + + /** + * Returns true if {@link #in} must sync its files. + * Currently, only {@link org.apache.lucene.store.NRTCachingDirectory} requires sync'ing its files + * because otherwise they are cached in an internal {@link org.apache.lucene.store.RAMDirectory}. If + * other directories require that too, they should be added to this method. + */ + private boolean mustSync() { + Directory delegate = in; + while (delegate instanceof FilterDirectory) { + if (delegate instanceof NRTCachingDirectory) { + return true; + } + delegate = ((FilterDirectory) delegate).getDelegate(); + } + return delegate instanceof NRTCachingDirectory; + } + + @Override + public synchronized void sync(Collection names) throws IOException { + // don't wear out our hardware so much in tests. + if (superRandomState.nextInt(100) == 0 || mustSync()) { + super.sync(names); + } else { + superUnSyncedFiles.removeAll(names); + } + } + + @Override + public synchronized void crash() throws IOException { + if (crash) { + super.crash(); + } + } + } + + final class CloseableDirectory implements Closeable { + private final BaseDirectoryWrapper dir; + private final TestRuleMarkFailure failureMarker; + + public CloseableDirectory(BaseDirectoryWrapper dir) { + this.dir = dir; + try { + final Field suiteFailureMarker = LuceneTestCase.class.getDeclaredField("suiteFailureMarker"); + suiteFailureMarker.setAccessible(true); + this.failureMarker = (TestRuleMarkFailure) suiteFailureMarker.get(LuceneTestCase.class); + } catch (Throwable e) { + throw new ElasticsearchException("foo", e); + } + } + + @Override + public void close() { + // We only attempt to check open/closed state if there were no other test + // failures. + try { + if (failureMarker.wasSuccessful() && dir.isOpen()) { + Assert.fail("Directory not closed: " + dir); + } + } finally { + // TODO: perform real close of the delegate: LUCENE-4058 + // dir.close(); + } + } + } } diff --git a/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java index 27adcc09f46..ade21f3182b 100644 --- a/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java +++ b/src/test/java/org/elasticsearch/test/store/MockFSIndexStore.java @@ -25,16 +25,17 @@ import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.settings.IndexSettings; +import org.elasticsearch.index.settings.IndexSettingsService; import org.elasticsearch.index.store.DirectoryService; -import org.elasticsearch.index.store.support.AbstractIndexStore; +import org.elasticsearch.index.store.IndexStore; import org.elasticsearch.indices.store.IndicesStore; -public class MockFSIndexStore extends AbstractIndexStore { +public class MockFSIndexStore extends IndexStore { @Inject - public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexService indexService, - IndicesStore indicesStore, NodeEnvironment nodeEnv) { - super(index, indexSettings, indexService, indicesStore, nodeEnv); + public MockFSIndexStore(Index index, @IndexSettings Settings indexSettings, IndexSettingsService indexSettingsService, + IndicesStore indicesStore) { + super(index, indexSettings, indexSettingsService, indicesStore); } @Override diff --git a/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java b/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java new file mode 100644 index 00000000000..8cb1f620c3a --- /dev/null +++ b/src/test/java/org/elasticsearch/test/transport/CapturingTransport.java @@ -0,0 +1,180 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.test.transport; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.component.LifecycleListener; +import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.transport.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.BlockingQueue; + +/** A transport class that doesn't send anything but rather captures all requests for inspection from tests */ +public class CapturingTransport implements Transport { + private TransportServiceAdapter adapter; + + static public class CapturedRequest { + final public DiscoveryNode node; + final public long requestId; + final public String action; + final public TransportRequest request; + + public CapturedRequest(DiscoveryNode node, long requestId, String action, TransportRequest request) { + this.node = node; + this.requestId = requestId; + this.action = action; + this.request = request; + } + } + + private BlockingQueue capturedRequests = ConcurrentCollections.newBlockingQueue(); + + /** returns all requests captured so far. Doesn't clear the captured request list. See {@link #clear()} */ + public CapturedRequest[] capturedRequests() { + return capturedRequests.toArray(new CapturedRequest[0]); + } + + /** + * returns all requests captured so far, grouped by target node. + * Doesn't clear the captured request list. See {@link #clear()} + */ + public Map> capturedRequestsByTargetNode() { + Map> map = new HashMap<>(); + for (CapturedRequest request : capturedRequests) { + List nodeList = map.get(request.node.id()); + if (nodeList == null) { + nodeList = new ArrayList<>(); + map.put(request.node.id(), nodeList); + } + nodeList.add(request); + } + return map; + } + + /** clears captured requests */ + public void clear() { + capturedRequests.clear(); + } + + /** simulate a response for the given requestId */ + public void handleResponse(final long requestId, final TransportResponse response) { + adapter.onResponseReceived(requestId).handleResponse(response); + } + + /** simulate a remote error for the given requesTId */ + public void handleResponse(final long requestId, final Throwable t) { + adapter.onResponseReceived(requestId).handleException(new RemoteTransportException("remote failure", t)); + } + + + @Override + public void sendRequest(DiscoveryNode node, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, TransportException { + capturedRequests.add(new CapturedRequest(node, requestId, action, request)); + } + + + @Override + public void transportServiceAdapter(TransportServiceAdapter adapter) { + this.adapter = adapter; + } + + @Override + public BoundTransportAddress boundAddress() { + return null; + } + + @Override + public Map profileBoundAddresses() { + return null; + } + + @Override + public TransportAddress[] addressesFromString(String address) throws Exception { + return new TransportAddress[0]; + } + + @Override + public boolean addressSupported(Class address) { + return false; + } + + @Override + public boolean nodeConnected(DiscoveryNode node) { + return true; + } + + @Override + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + + } + + @Override + public void connectToNodeLight(DiscoveryNode node) throws ConnectTransportException { + + } + + @Override + public void disconnectFromNode(DiscoveryNode node) { + + } + + @Override + public long serverOpen() { + return 0; + } + + @Override + public Lifecycle.State lifecycleState() { + return null; + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + + } + + @Override + public Transport start() { + return null; + } + + @Override + public Transport stop() { + return null; + } + + @Override + public void close() { + + } +} diff --git a/src/test/java/org/elasticsearch/test/transport/MockTransportService.java b/src/test/java/org/elasticsearch/test/transport/MockTransportService.java index cfb7284f749..5e8b135a61c 100644 --- a/src/test/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/src/test/java/org/elasticsearch/test/transport/MockTransportService.java @@ -228,10 +228,10 @@ public class MockTransportService extends TransportService { } // poor mans request cloning... - TransportRequestHandler handler = MockTransportService.this.getHandler(action); + RequestHandlerRegistry reg = MockTransportService.this.getRequestHandler(action); BytesStreamOutput bStream = new BytesStreamOutput(); request.writeTo(bStream); - final TransportRequest clonedRequest = handler.newInstance(); + final TransportRequest clonedRequest = reg.newRequest(); clonedRequest.readFrom(new BytesStreamInput(bStream.bytes())); threadPool.schedule(delay, ThreadPool.Names.GENERIC, new AbstractRunnable() { @@ -387,19 +387,19 @@ public class MockTransportService extends TransportService { } @Override - public Transport start() throws ElasticsearchException { + public Transport start() { transport.start(); return this; } @Override - public Transport stop() throws ElasticsearchException { + public Transport stop() { transport.stop(); return this; } @Override - public void close() throws ElasticsearchException { + public void close() { transport.close(); } diff --git a/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java index 38ef0285179..09bdd79c70a 100644 --- a/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java +++ b/src/test/java/org/elasticsearch/transport/AbstractSimpleTransportTests.java @@ -125,17 +125,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testHelloWorld() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { assertThat("moshe", equalTo(request.message)); @@ -221,17 +211,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase serviceA.disconnectFromNode(nodeA); } final AtomicReference exception = new AtomicReference<>(); - serviceA.registerHandler("localNode", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("localNode", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { try { @@ -273,17 +253,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVoidMessageCompressed() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public TransportRequest.Empty newInstance() { - return TransportRequest.Empty.INSTANCE; - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", TransportRequest.Empty.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { try { @@ -330,17 +300,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testHelloWorldCompressed() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { assertThat("moshe", equalTo(request.message)); @@ -389,17 +349,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testErrorMessage() { - serviceA.registerHandler("sayHelloException", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHelloException", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { assertThat("moshe", equalTo(request.message)); @@ -463,20 +413,9 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase public void testNotifyOnShutdown() throws Exception { final CountDownLatch latch2 = new CountDownLatch(1); - serviceA.registerHandler("foobar", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("foobar", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { - try { latch2.await(); logger.info("Stop ServiceB now"); @@ -500,17 +439,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testTimeoutSendExceptionWithNeverSendingBackResponse() throws Exception { - serviceA.registerHandler("sayHelloTimeoutNoResponse", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHelloTimeoutNoResponse", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { assertThat("moshe", equalTo(request.message)); @@ -559,17 +488,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testTimeoutSendExceptionWithDelayedResponse() throws Exception { - serviceA.registerHandler("sayHelloTimeoutDelayedResponse", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHelloTimeoutDelayedResponse", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) { TimeValue sleep = TimeValue.parseTimeValue(request.message, null); @@ -658,29 +577,14 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test @TestLogging(value = "test. transport.tracer:TRACE") public void testTracerLog() throws InterruptedException { - TransportRequestHandler handler = new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(""); - } - + TransportRequestHandler handler = new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { channel.sendResponse(new StringMessageResponse("")); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }; - TransportRequestHandler handlerWithError = new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(""); - } - + TransportRequestHandler handlerWithError = new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { if (request.timeout() > 0) { @@ -689,11 +593,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase channel.sendResponse(new RuntimeException("")); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }; final Semaphore requestCompleted = new Semaphore(0); @@ -720,10 +619,10 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase } }; - serviceA.registerHandler("test", handler); - serviceA.registerHandler("testError", handlerWithError); - serviceB.registerHandler("test", handler); - serviceB.registerHandler("testError", handlerWithError); + serviceA.registerRequestHandler("test", StringMessageRequest.class, ThreadPool.Names.SAME, handler); + serviceA.registerRequestHandler("testError", StringMessageRequest.class, ThreadPool.Names.SAME, handlerWithError); + serviceB.registerRequestHandler("test", StringMessageRequest.class, ThreadPool.Names.SAME, handler); + serviceB.registerRequestHandler("testError", StringMessageRequest.class, ThreadPool.Names.SAME, handlerWithError); final Tracer tracer = new Tracer(); serviceA.addTracer(tracer); @@ -983,12 +882,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from0to1() throws Exception { - serviceB.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version1Request newInstance() { - return new Version1Request(); - } - + serviceB.registerRequestHandler("/version", Version1Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version1Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -998,11 +892,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value2 = 2; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version0Request version0Request = new Version0Request(); @@ -1035,12 +924,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from1to0() throws Exception { - serviceA.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version0Request newInstance() { - return new Version0Request(); - } - + serviceA.registerRequestHandler("/version", Version0Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version0Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -1048,11 +932,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value1 = 1; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version1Request version1Request = new Version1Request(); @@ -1088,12 +967,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from1to1() throws Exception { - serviceB.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version1Request newInstance() { - return new Version1Request(); - } - + serviceB.registerRequestHandler("/version", Version1Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version1Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -1103,11 +977,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value2 = 2; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version1Request version1Request = new Version1Request(); @@ -1143,12 +1012,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testVersion_from0to0() throws Exception { - serviceA.registerHandler("/version", new BaseTransportRequestHandler() { - @Override - public Version0Request newInstance() { - return new Version0Request(); - } - + serviceA.registerRequestHandler("/version", Version0Request.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(Version0Request request, TransportChannel channel) throws Exception { assertThat(request.value1, equalTo(1)); @@ -1156,11 +1020,6 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase response.value1 = 1; channel.sendResponse(response); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } }); Version0Request version0Request = new Version0Request(); @@ -1193,17 +1052,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testMockFailToSendNoConnectRule() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { assertThat("moshe", equalTo(request.message)); @@ -1262,17 +1111,7 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase @Test public void testMockUnresponsiveRule() { - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public StringMessageRequest newInstance() { - return new StringMessageRequest(); - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", StringMessageRequest.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(StringMessageRequest request, TransportChannel channel) throws Exception { assertThat("moshe", equalTo(request.message)); @@ -1335,28 +1174,13 @@ public abstract class AbstractSimpleTransportTests extends ElasticsearchTestCase final CountDownLatch latch = new CountDownLatch(2); final AtomicReference addressA = new AtomicReference<>(); final AtomicReference addressB = new AtomicReference<>(); - serviceB.registerHandler("action1", new TransportRequestHandler() { - @Override - public TestRequest newInstance() { - return new TestRequest(); - } - + serviceB.registerRequestHandler("action1", TestRequest.class, ThreadPool.Names.SAME, new TransportRequestHandler() { @Override public void messageReceived(TestRequest request, TransportChannel channel) throws Exception { addressA.set(request.remoteAddress()); channel.sendResponse(new TestResponse()); latch.countDown(); } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public boolean isForceExecution() { - return false; - } }); serviceA.sendRequest(nodeB, "action1", new TestRequest(), new TransportResponseHandler() { @Override diff --git a/src/test/java/org/elasticsearch/transport/ActionNamesTests.java b/src/test/java/org/elasticsearch/transport/ActionNamesTests.java index f1e98801d41..69be9f8fdf2 100644 --- a/src/test/java/org/elasticsearch/transport/ActionNamesTests.java +++ b/src/test/java/org/elasticsearch/transport/ActionNamesTests.java @@ -19,14 +19,6 @@ package org.elasticsearch.transport; -import com.google.common.collect.Lists; -import org.elasticsearch.Version; -import org.elasticsearch.action.admin.indices.get.GetIndexAction; -import org.elasticsearch.action.admin.cluster.repositories.verify.VerifyRepositoryAction; -import org.elasticsearch.action.exists.ExistsAction; -import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing; -import org.elasticsearch.search.action.SearchServiceTransportAction; -import org.elasticsearch.repositories.VerifyNodeRepositoryAction; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.junit.Test; @@ -55,7 +47,7 @@ public class ActionNamesTests extends ElasticsearchIntegrationTest { @SuppressWarnings("unchecked") public void testActionNamesCategories() throws NoSuchFieldException, IllegalAccessException { TransportService transportService = internalCluster().getInstance(TransportService.class); - for (String action : transportService.serverHandlers.keySet()) { + for (String action : transportService.requestHandlers.keySet()) { assertThat("action doesn't belong to known category", action, either(startsWith("indices:admin")).or(startsWith("indices:monitor")) .or(startsWith("indices:data/read")).or(startsWith("indices:data/write")) .or(startsWith("cluster:admin")).or(startsWith("cluster:monitor")) diff --git a/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java b/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java index 928cd2e7955..5a0e64d992c 100644 --- a/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java +++ b/src/test/java/org/elasticsearch/transport/NettySizeHeaderFrameDecoderTests.java @@ -61,7 +61,7 @@ public class NettySizeHeaderFrameDecoderTests extends ElasticsearchTestCase { threadPool = new ThreadPool(settings, new NodeSettingsService(settings)); NetworkService networkService = new NetworkService(settings); - BigArrays bigArrays = new MockBigArrays(settings, new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); + BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); nettyTransport = new NettyTransport(settings, threadPool, networkService, bigArrays, Version.CURRENT); nettyTransport.start(); TransportService transportService = new TransportService(nettyTransport, threadPool); diff --git a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java index 07d5751ba96..1f5e5f9c9ee 100644 --- a/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/KeyedLockTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.transport.netty; -import org.elasticsearch.ElasticsearchIllegalStateException; import org.elasticsearch.common.util.concurrent.KeyedLock; import org.elasticsearch.test.ElasticsearchTestCase; import org.hamcrest.Matchers; @@ -40,7 +39,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { public void checkIfMapEmptyAfterLotsOfAcquireAndReleases() throws InterruptedException { ConcurrentHashMap counter = new ConcurrentHashMap<>(); ConcurrentHashMap safeCounter = new ConcurrentHashMap<>(); - KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable() : new KeyedLock(); + KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable(randomBoolean()) : new KeyedLock(randomBoolean()); String[] names = new String[randomIntBetween(1, 40)]; for (int i = 0; i < names.length; i++) { names[i] = randomRealisticUnicodeOfLengthBetween(10, 20); @@ -75,7 +74,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void checkCannotAcquireTwoLocksGlobal() throws InterruptedException { KeyedLock.GlobalLockable connectionLock = new KeyedLock.GlobalLockable<>(); String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); @@ -89,7 +88,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { } } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void checkCannotAcquireTwoLocks() throws InterruptedException { KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable() : new KeyedLock(); String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); @@ -97,7 +96,7 @@ public class KeyedLockTests extends ElasticsearchTestCase { connectionLock.acquire(name); } - @Test(expected = ElasticsearchIllegalStateException.class) + @Test(expected = IllegalStateException.class) public void checkCannotReleaseUnacquiredLock() throws InterruptedException { KeyedLock connectionLock = randomBoolean() ? new KeyedLock.GlobalLockable() : new KeyedLock(); String name = randomRealisticUnicodeOfLength(scaledRandomIntBetween(10, 50)); diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java index 9e323f809ee..8fe32cfc27e 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyScheduledPingTests.java @@ -30,8 +30,6 @@ import org.elasticsearch.test.ElasticsearchTestCase; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.*; -import org.hamcrest.Matchers; -import org.junit.Before; import org.junit.Test; import java.io.IOException; @@ -75,17 +73,7 @@ public class NettyScheduledPingTests extends ElasticsearchTestCase { assertThat(nettyA.scheduledPing.failedPings.count(), equalTo(0l)); assertThat(nettyB.scheduledPing.failedPings.count(), equalTo(0l)); - serviceA.registerHandler("sayHello", new BaseTransportRequestHandler() { - @Override - public TransportRequest.Empty newInstance() { - return TransportRequest.Empty.INSTANCE; - } - - @Override - public String executor() { - return ThreadPool.Names.GENERIC; - } - + serviceA.registerRequestHandler("sayHello", TransportRequest.Empty.class, ThreadPool.Names.GENERIC, new TransportRequestHandler() { @Override public void messageReceived(TransportRequest.Empty request, TransportChannel channel) { try { diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java index 20a1731e0c9..aeb085a07b3 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyTransportMultiPortTests.java @@ -170,7 +170,7 @@ public class NettyTransportMultiPortTests extends ElasticsearchTestCase { } private NettyTransport startNettyTransport(Settings settings, ThreadPool threadPool) { - BigArrays bigArrays = new MockBigArrays(settings, new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); + BigArrays bigArrays = new MockBigArrays(new PageCacheRecycler(settings, threadPool), new NoneCircuitBreakerService()); NettyTransport nettyTransport = new NettyTransport(settings, threadPool, new NetworkService(settings), bigArrays, Version.CURRENT); nettyTransport.start(); diff --git a/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java b/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java index 785aaf40abc..5edc6b0dfa7 100644 --- a/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java +++ b/src/test/java/org/elasticsearch/transport/netty/NettyTransportTests.java @@ -34,10 +34,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.test.ElasticsearchIntegrationTest; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.transport.ActionNotFoundTransportException; -import org.elasticsearch.transport.TransportModule; -import org.elasticsearch.transport.TransportRequest; -import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.*; import org.jboss.netty.channel.Channel; import org.jboss.netty.channel.ChannelPipeline; import org.jboss.netty.channel.ChannelPipelineFactory; @@ -115,21 +112,21 @@ public class NettyTransportTests extends ElasticsearchIntegrationTest { final NettyTransportChannel transportChannel = new NettyTransportChannel(transport, transportServiceAdapter, action, channel, requestId, version, name); try { - final TransportRequestHandler handler = transportServiceAdapter.handler(action); - if (handler == null) { + final RequestHandlerRegistry reg = transportServiceAdapter.getRequestHandler(action); + if (reg == null) { throw new ActionNotFoundTransportException(action); } - final TransportRequest request = handler.newInstance(); + final TransportRequest request = reg.newRequest(); request.remoteAddress(new InetSocketTransportAddress((InetSocketAddress) channel.getRemoteAddress())); request.readFrom(buffer); if (request.hasHeader("ERROR")) { throw new ElasticsearchException((String) request.getHeader("ERROR")); } - if (handler.executor() == ThreadPool.Names.SAME) { + if (reg.getExecutor() == ThreadPool.Names.SAME) { //noinspection unchecked - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } else { - threadPool.executor(handler.executor()).execute(new RequestHandler(handler, request, transportChannel, action)); + threadPool.executor(reg.getExecutor()).execute(new RequestHandler(reg, request, transportChannel)); } } catch (Throwable e) { try { @@ -144,27 +141,25 @@ public class NettyTransportTests extends ElasticsearchIntegrationTest { } class RequestHandler extends AbstractRunnable { - private final TransportRequestHandler handler; + private final RequestHandlerRegistry reg; private final TransportRequest request; private final NettyTransportChannel transportChannel; - private final String action; - public RequestHandler(TransportRequestHandler handler, TransportRequest request, NettyTransportChannel transportChannel, String action) { - this.handler = handler; + public RequestHandler(RequestHandlerRegistry reg, TransportRequest request, NettyTransportChannel transportChannel) { + this.reg = reg; this.request = request; this.transportChannel = transportChannel; - this.action = action; } @SuppressWarnings({"unchecked"}) @Override protected void doRun() throws Exception { - handler.messageReceived(request, transportChannel); + reg.getHandler().messageReceived(request, transportChannel); } @Override public boolean isForceExecution() { - return handler.isForceExecution(); + return reg.isForceExecution(); } @Override @@ -174,7 +169,7 @@ public class NettyTransportTests extends ElasticsearchIntegrationTest { try { transportChannel.sendResponse(e); } catch (Throwable e1) { - logger.warn("Failed to send error message back to client for action [" + action + "]", e1); + logger.warn("Failed to send error message back to client for action [" + reg.getAction() + "]", e1); logger.warn("Actual Exception", e); } } } diff --git a/src/test/java/org/elasticsearch/update/UpdateNoopTests.java b/src/test/java/org/elasticsearch/update/UpdateNoopTests.java index ab428aab75f..e5c1ee9625e 100644 --- a/src/test/java/org/elasticsearch/update/UpdateNoopTests.java +++ b/src/test/java/org/elasticsearch/update/UpdateNoopTests.java @@ -197,7 +197,7 @@ public class UpdateNoopTests extends ElasticsearchIntegrationTest { update(false, 2, XContentFactory.jsonBuilder().startObject().endObject()); } - private XContentBuilder fields(Object... fields) throws ElasticsearchException, IOException { + private XContentBuilder fields(Object... fields) throws IOException { assertEquals("Fields must field1, value1, field2, value2, etc", 0, fields.length % 2); XContentBuilder builder = XContentFactory.jsonBuilder().startObject(); diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip new file mode 100644 index 00000000000..da4b85f084e Binary files /dev/null and b/src/test/resources/org/elasticsearch/bwcompat/index-1.4.5.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip b/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip new file mode 100644 index 00000000000..c7bae64bd25 Binary files /dev/null and b/src/test/resources/org/elasticsearch/bwcompat/index-1.5.2.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip new file mode 100644 index 00000000000..8a0c271c417 Binary files /dev/null and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.4.5.zip differ diff --git a/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip b/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip new file mode 100644 index 00000000000..affcfb6e9ed Binary files /dev/null and b/src/test/resources/org/elasticsearch/bwcompat/repo-1.5.2.zip differ diff --git a/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty b/src/test/resources/org/elasticsearch/plugins/dummy/_site/dir1/.empty new file mode 100644 index 00000000000..e69de29bb2d diff --git a/src/test/resources/packaging/scripts/30_deb_package.bats b/src/test/resources/packaging/scripts/30_deb_package.bats index 7130d275d8c..13f2d3275ea 100644 --- a/src/test/resources/packaging/scripts/30_deb_package.bats +++ b/src/test/resources/packaging/scripts/30_deb_package.bats @@ -128,7 +128,6 @@ setup() { # Those directories are deleted when removing the package # see postrm file assert_file_not_exist "/var/log/elasticsearch" - assert_file_not_exist "/tmp/elasticsearch" assert_file_not_exist "/usr/share/elasticsearch/plugins" assert_file_not_exist "/var/run/elasticsearch" diff --git a/src/test/resources/packaging/scripts/40_rpm_package.bats b/src/test/resources/packaging/scripts/40_rpm_package.bats index 6be482867f9..af9692fa903 100644 --- a/src/test/resources/packaging/scripts/40_rpm_package.bats +++ b/src/test/resources/packaging/scripts/40_rpm_package.bats @@ -126,7 +126,6 @@ setup() { # Those directories are deleted when removing the package # see postrm file assert_file_not_exist "/var/log/elasticsearch" - assert_file_not_exist "/tmp/elasticsearch" assert_file_not_exist "/usr/share/elasticsearch/plugins" assert_file_not_exist "/var/run/elasticsearch" diff --git a/src/test/resources/packaging/scripts/packaging_test_utils.bash b/src/test/resources/packaging/scripts/packaging_test_utils.bash index 6ef1874c4ab..0033e240ee5 100644 --- a/src/test/resources/packaging/scripts/packaging_test_utils.bash +++ b/src/test/resources/packaging/scripts/packaging_test_utils.bash @@ -192,8 +192,6 @@ verify_package_installation() { assert_file "/var/lib/elasticsearch" d elasticsearch 755 # Log dir assert_file "/var/log/elasticsearch" d elasticsearch 755 - # Work dir - assert_file "/tmp/elasticsearch" d elasticsearch 755 # Plugins dir assert_file "/usr/share/elasticsearch/plugins" d elasticsearch 755 # PID dir