Merge branch 'master' into ccr

* master: (31 commits)
  [TEST] Fix `GeoShapeQueryTests#testPointsOnly` failure
  Transition transport apis to use void listeners (#27440)
  AwaitsFix GeoShapeQueryTests#testPointsOnly #27454
  Bump test version after backport
  Ensure nested documents have consistent version and seq_ids (#27455)
  Tests: Add Fedora-27 to packaging tests
  Delete some seemingly unused exceptions (#27439)
  #26800: Fix docs rendering
  Remove config prompting for secrets and text (#27216)
  Move the CLI into its own subproject (#27114)
  Correct usage of "an" to "a" in getting started docs
  Avoid NPE when getting build information
  Removes BWC snapshot status handler used in 6.x (#27443)
  Remove manual tracking of registered channels (#27445)
  Remove parameters on HandshakeResponseHandler (#27444)
  [GEO] fix pointsOnly bug for MULTIPOINT
  Standardize underscore requirements in parameters (#27414)
  peanut butter hamburgers
  Log primary-replica resync failures
  Uses TransportMasterNodeAction to update shard snapshot status (#27165)
  ...
This commit is contained in:
Jason Tedor 2017-11-20 13:17:20 -05:00
commit 58591f2d16
120 changed files with 1588 additions and 1141 deletions

View File

@ -208,8 +208,7 @@ In order to create a distribution, simply run the @gradle assemble@ command in t
The distribution for each project will be created under the @build/distributions@ directory in that project.
See the "TESTING":TESTING.asciidoc file for more information about
running the Elasticsearch test suite.
See the "TESTING":TESTING.asciidoc file for more information about running the Elasticsearch test suite.
h3. Upgrading from Elasticsearch 1.x?

View File

@ -351,8 +351,8 @@ These are the linux flavors the Vagrantfile currently supports:
* debian-9 aka stretch, the current debian stable distribution
* centos-6
* centos-7
* fedora-25
* fedora-26
* fedora-27
* oel-6 aka Oracle Enterprise Linux 6
* oel-7 aka Oracle Enterprise Linux 7
* sles-12
@ -428,23 +428,23 @@ sudo -E bats $BATS_TESTS/*.bats
You can also use Gradle to prepare the test environment and then starts a single VM:
-------------------------------------------------
gradle vagrantFedora25#up
gradle vagrantFedora27#up
-------------------------------------------------
Or any of vagrantCentos6#up, vagrantCentos7#up, vagrantDebian8#up,
vagrantFedora25#up, vagrantOel6#up, vagrantOel7#up, vagrantOpensuse13#up,
vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up.
vagrantDebian9#up, vagrantFedora26#up, vagrantFedora27#up, vagrantOel6#up, vagrantOel7#up,
vagrantOpensuse42#up,vagrantSles12#up, vagrantUbuntu1404#up, vagrantUbuntu1604#up.
Once up, you can then connect to the VM using SSH from the elasticsearch directory:
-------------------------------------------------
vagrant ssh fedora-25
vagrant ssh fedora-27
-------------------------------------------------
Or from another directory:
-------------------------------------------------
VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-25
VAGRANT_CWD=/path/to/elasticsearch vagrant ssh fedora-27
-------------------------------------------------
Note: Starting vagrant VM outside of the elasticsearch folder requires to

8
Vagrantfile vendored
View File

@ -60,14 +60,14 @@ Vagrant.configure(2) do |config|
config.vm.box = "elastic/oraclelinux-7-x86_64"
rpm_common config
end
config.vm.define "fedora-25" do |config|
config.vm.box = "elastic/fedora-25-x86_64"
dnf_common config
end
config.vm.define "fedora-26" do |config|
config.vm.box = "elastic/fedora-26-x86_64"
dnf_common config
end
config.vm.define "fedora-27" do |config|
config.vm.box = "elastic/fedora-27-x86_64"
dnf_common config
end
config.vm.define "opensuse-42" do |config|
config.vm.box = "elastic/opensuse-42-x86_64"
opensuse_common config

View File

@ -81,6 +81,7 @@ List<Version> versions = []
// keep track of the previous major version's last minor, so we know where wire compat begins
int prevMinorIndex = -1 // index in the versions list of the last minor from the prev major
int lastPrevMinor = -1 // the minor version number from the prev major we most recently seen
int prevBugfixIndex = -1 // index in the versions list of the last bugfix release from the prev major
for (String line : versionLines) {
/* Note that this skips alphas and betas which is fine because they aren't
* compatible with anything. */
@ -108,12 +109,19 @@ for (String line : versionLines) {
lastPrevMinor = minor
}
}
if (major == prevMajor) {
prevBugfixIndex = versions.size() - 1
}
}
}
if (versions.toSorted { it.id } != versions) {
println "Versions: ${versions}"
throw new GradleException("Versions.java contains out of order version constants")
}
if (prevBugfixIndex != -1) {
versions[prevBugfixIndex] = new Version(versions[prevBugfixIndex].major, versions[prevBugfixIndex].minor,
versions[prevBugfixIndex].bugfix, versions[prevBugfixIndex].suffix, true)
}
if (currentVersion.bugfix == 0) {
// If on a release branch, after the initial release of that branch, the bugfix version will
// be bumped, and will be != 0. On master and N.x branches, we want to test against the
@ -223,6 +231,7 @@ subprojects {
"org.elasticsearch.gradle:build-tools:${version}": ':build-tools',
"org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec',
"org.elasticsearch:elasticsearch:${version}": ':core',
"org.elasticsearch:elasticsearch-cli:${version}": ':core:cli',
"org.elasticsearch.client:elasticsearch-rest-client:${version}": ':client:rest',
"org.elasticsearch.client:elasticsearch-rest-client-sniffer:${version}": ':client:sniffer',
"org.elasticsearch.client:elasticsearch-rest-high-level-client:${version}": ':client:rest-high-level',
@ -262,6 +271,11 @@ subprojects {
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
}
} else if (indexCompatVersions[-2].snapshot) {
/* This is a terrible hack for the bump to 6.0.1 which will be fixed by #27397 */
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
}
project.afterEvaluate {
configurations.all {

View File

@ -19,8 +19,8 @@ class VagrantTestPlugin implements Plugin<Project> {
'centos-7',
'debian-8',
'debian-9',
'fedora-25',
'fedora-26',
'fedora-27',
'oel-6',
'oel-7',
'opensuse-42',

View File

@ -191,23 +191,23 @@ public final class Request {
metadata.field("_id", request.id());
}
if (Strings.hasLength(request.routing())) {
metadata.field("_routing", request.routing());
metadata.field("routing", request.routing());
}
if (Strings.hasLength(request.parent())) {
metadata.field("_parent", request.parent());
metadata.field("parent", request.parent());
}
if (request.version() != Versions.MATCH_ANY) {
metadata.field("_version", request.version());
metadata.field("version", request.version());
}
VersionType versionType = request.versionType();
if (versionType != VersionType.INTERNAL) {
if (versionType == VersionType.EXTERNAL) {
metadata.field("_version_type", "external");
metadata.field("version_type", "external");
} else if (versionType == VersionType.EXTERNAL_GTE) {
metadata.field("_version_type", "external_gte");
metadata.field("version_type", "external_gte");
} else if (versionType == VersionType.FORCE) {
metadata.field("_version_type", "force");
metadata.field("version_type", "force");
}
}
@ -219,7 +219,7 @@ public final class Request {
} else if (opType == DocWriteRequest.OpType.UPDATE) {
UpdateRequest updateRequest = (UpdateRequest) request;
if (updateRequest.retryOnConflict() > 0) {
metadata.field("_retry_on_conflict", updateRequest.retryOnConflict());
metadata.field("retry_on_conflict", updateRequest.retryOnConflict());
}
if (updateRequest.fetchSource() != null) {
metadata.field("_source", updateRequest.fetchSource());

View File

@ -58,7 +58,7 @@ dependencies {
compile 'org.elasticsearch:securesm:1.1'
// utilities
compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
compile "org.elasticsearch:elasticsearch-cli:${version}"
compile 'com.carrotsearch:hppc:0.7.1'
// time handling, remove with java 8 time
@ -265,6 +265,12 @@ if (JavaVersion.current() > JavaVersion.VERSION_1_8) {
dependencyLicenses {
mapping from: /lucene-.*/, to: 'lucene'
mapping from: /jackson-.*/, to: 'jackson'
dependencies = project.configurations.runtime.fileCollection {
it.group.startsWith('org.elasticsearch') == false ||
// keep the following org.elasticsearch jars in
(it.name == 'jna' ||
it.name == 'securesm')
}
}
if (isEclipse == false || project.path == ":core-tests") {

36
core/cli/build.gradle Normal file
View File

@ -0,0 +1,36 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.gradle.precommit.PrecommitTasks
apply plugin: 'elasticsearch.build'
archivesBaseName = 'elasticsearch-cli'
dependencies {
compile 'net.sf.jopt-simple:jopt-simple:5.0.2'
}
test.enabled = false
// Since CLI does not depend on :core, it cannot run the jarHell task
jarHell.enabled = false
forbiddenApisMain {
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
}

View File

@ -23,11 +23,6 @@ import joptsimple.OptionException;
import joptsimple.OptionParser;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import org.apache.logging.log4j.Level;
import org.apache.lucene.util.SetOnce;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import java.io.Closeable;
import java.io.IOException;
@ -55,12 +50,13 @@ public abstract class Command implements Closeable {
this.description = description;
}
final SetOnce<Thread> shutdownHookThread = new SetOnce<>();
private Thread shutdownHookThread;
/** Parses options for this command from args and executes it. */
public final int main(String[] args, Terminal terminal) throws Exception {
if (addShutdownHook()) {
shutdownHookThread.set(new Thread(() -> {
shutdownHookThread = new Thread(() -> {
try {
this.close();
} catch (final IOException e) {
@ -75,16 +71,11 @@ public abstract class Command implements Closeable {
throw new AssertionError(impossible);
}
}
}));
Runtime.getRuntime().addShutdownHook(shutdownHookThread.get());
});
Runtime.getRuntime().addShutdownHook(shutdownHookThread);
}
if (shouldConfigureLoggingWithoutConfig()) {
// initialize default for es.logger.level because we will not read the log4j2.properties
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
LogConfigurator.configureWithoutConfig(settings);
}
beforeExecute();
try {
mainWithoutErrorHandling(args, terminal);
@ -103,14 +94,10 @@ public abstract class Command implements Closeable {
}
/**
* Indicate whether or not logging should be configured without reading a log4j2.properties. Most commands should do this because we do
* not configure logging for CLI tools. Only commands that configure logging on their own should not do this.
*
* @return true if logging should be configured without reading a log4j2.properties file
* Setup method to be executed before parsing or execution of the command being run. Any exceptions thrown by the
* method will not be cleanly caught by the parser.
*/
protected boolean shouldConfigureLoggingWithoutConfig() {
return true;
}
protected void beforeExecute() {}
/**
* Executes the command, but all errors are thrown.
@ -166,6 +153,11 @@ public abstract class Command implements Closeable {
return true;
}
/** Gets the shutdown hook thread if it exists **/
Thread getShutdownHookThread() {
return shutdownHookThread;
}
@Override
public void close() throws IOException {

View File

@ -0,0 +1,34 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cli;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
/**
* Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field.
*/
@Retention(RetentionPolicy.CLASS)
@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE })
public @interface SuppressForbidden {
String reason();
}

View File

@ -19,8 +19,6 @@
package org.elasticsearch.cli;
import org.elasticsearch.common.SuppressForbidden;
import java.io.BufferedReader;
import java.io.Console;
import java.io.IOException;

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
import java.net.URL;
import java.security.CodeSource;
import java.util.jar.JarInputStream;
import java.util.jar.Manifest;
@ -45,8 +46,8 @@ public class Build {
final boolean isSnapshot;
final String esPrefix = "elasticsearch-" + Version.CURRENT;
final URL url = getElasticsearchCodebase();
final String urlStr = url.toString();
final URL url = getElasticsearchCodeSourceLocation();
final String urlStr = url == null ? "" : url.toString();
if (urlStr.startsWith("file:/") && (urlStr.endsWith(esPrefix + ".jar") || urlStr.endsWith(esPrefix + "-SNAPSHOT.jar"))) {
try (JarInputStream jar = new JarInputStream(FileSystemUtils.openFileURLStream(url))) {
Manifest manifest = jar.getManifest();
@ -88,10 +89,13 @@ public class Build {
private final boolean isSnapshot;
/**
* Returns path to elasticsearch codebase path
* The location of the code source for Elasticsearch
*
* @return the location of the code source for Elasticsearch which may be null
*/
static URL getElasticsearchCodebase() {
return Build.class.getProtectionDomain().getCodeSource().getLocation();
static URL getElasticsearchCodeSourceLocation() {
final CodeSource codeSource = Build.class.getProtectionDomain().getCodeSource();
return codeSource == null ? null : codeSource.getLocation();
}
private final String shortHash;

View File

@ -28,6 +28,11 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.monitor.jvm.JvmInfo;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
public class Version implements Comparable<Version> {
/*
@ -123,6 +128,9 @@ public class Version implements Comparable<Version> {
public static final int V_6_0_0_ID = 6000099;
public static final Version V_6_0_0 =
new Version(V_6_0_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_0_1_ID = 6000199;
public static final Version V_6_0_1 =
new Version(V_6_0_1_ID, org.apache.lucene.util.Version.LUCENE_7_0_1);
public static final int V_6_1_0_ID = 6010099;
public static final Version V_6_1_0 =
new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
@ -131,8 +139,6 @@ public class Version implements Comparable<Version> {
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_1_0);
public static final Version CURRENT = V_7_0_0_alpha1;
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
static {
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
+ org.apache.lucene.util.Version.LATEST + "] is still set to [" + CURRENT.luceneVersion + "]";
@ -148,6 +154,8 @@ public class Version implements Comparable<Version> {
return V_7_0_0_alpha1;
case V_6_1_0_ID:
return V_6_1_0;
case V_6_0_1_ID:
return V_6_0_1;
case V_6_0_0_ID:
return V_6_0_0;
case V_6_0_0_rc2_ID:
@ -363,19 +371,23 @@ public class Version implements Comparable<Version> {
* is a beta or RC release then the version itself is returned.
*/
public Version minimumCompatibilityVersion() {
final int bwcMajor;
final int bwcMinor;
// TODO: remove this entirely, making it static for each version
if (major == 6) { // we only specialize for current major here
bwcMajor = Version.V_5_6_0.major;
bwcMinor = Version.V_5_6_0.minor;
} else if (major == 7) { // we only specialize for current major here
return V_6_1_0;
} else {
bwcMajor = major;
bwcMinor = 0;
if (major >= 6) {
// all major versions from 6 onwards are compatible with last minor series of the previous major
final List<Version> declaredVersions = getDeclaredVersions(getClass());
Version bwcVersion = null;
for (int i = declaredVersions.size() - 1; i >= 0; i--) {
final Version candidateVersion = declaredVersions.get(i);
if (candidateVersion.major == major - 1 && candidateVersion.isRelease() && after(candidateVersion)) {
if (bwcVersion != null && candidateVersion.minor < bwcVersion.minor) {
break;
}
bwcVersion = candidateVersion;
}
}
return bwcVersion == null ? this : bwcVersion;
}
return Version.min(this, fromId(bwcMajor * 1000000 + bwcMinor * 10000 + 99));
return Version.min(this, fromId((int) major * 1000000 + 0 * 10000 + 99));
}
/**
@ -485,4 +497,34 @@ public class Version implements Comparable<Version> {
public boolean isRelease() {
return build == 99;
}
/**
* Extracts a sorted list of declared version constants from a class.
* The argument would normally be Version.class but is exposed for
* testing with other classes-containing-version-constants.
*/
public static List<Version> getDeclaredVersions(final Class<?> versionClass) {
final Field[] fields = versionClass.getFields();
final List<Version> versions = new ArrayList<>(fields.length);
for (final Field field : fields) {
final int mod = field.getModifiers();
if (false == Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
continue;
}
if (field.getType() != Version.class) {
continue;
}
if ("CURRENT".equals(field.getName())) {
continue;
}
assert field.getName().matches("V(_\\d+)+(_(alpha|beta|rc)\\d+)?") : field.getName();
try {
versions.add(((Version) field.get(null)));
} catch (final IllegalAccessException e) {
throw new RuntimeException(e);
}
}
Collections.sort(versions);
return versions;
}
}

View File

@ -30,6 +30,7 @@ import org.elasticsearch.action.support.WriteRequest;
import org.elasticsearch.action.support.replication.ReplicationRequest;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
@ -68,6 +69,19 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
private static final int REQUEST_OVERHEAD = 50;
private static final ParseField INDEX = new ParseField("_index");
private static final ParseField TYPE = new ParseField("_type");
private static final ParseField ID = new ParseField("_id");
private static final ParseField ROUTING = new ParseField("routing");
private static final ParseField PARENT = new ParseField("parent");
private static final ParseField OP_TYPE = new ParseField("op_type");
private static final ParseField VERSION = new ParseField("version");
private static final ParseField VERSION_TYPE = new ParseField("version_type");
private static final ParseField RETRY_ON_CONFLICT = new ParseField("retry_on_conflict");
private static final ParseField PIPELINE = new ParseField("pipeline");
private static final ParseField FIELDS = new ParseField("fields");
private static final ParseField SOURCE = new ParseField("_source");
/**
* Requests that are part of this request. It is only possible to add things that are both {@link ActionRequest}s and
* {@link WriteRequest}s to this but java doesn't support syntax to declare that everything in the array has both types so we declare
@ -334,45 +348,45 @@ public class BulkRequest extends ActionRequest implements CompositeIndicesReques
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("_index".equals(currentFieldName)) {
if (INDEX.match(currentFieldName)){
if (!allowExplicitIndex) {
throw new IllegalArgumentException("explicit index in bulk is not allowed");
}
index = parser.text();
} else if ("_type".equals(currentFieldName)) {
} else if (TYPE.match(currentFieldName)) {
type = parser.text();
} else if ("_id".equals(currentFieldName)) {
} else if (ID.match(currentFieldName)) {
id = parser.text();
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
} else if (ROUTING.match(currentFieldName)) {
routing = parser.text();
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
} else if (PARENT.match(currentFieldName)) {
parent = parser.text();
} else if ("op_type".equals(currentFieldName) || "opType".equals(currentFieldName)) {
} else if (OP_TYPE.match(currentFieldName)) {
opType = parser.text();
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
} else if (VERSION.match(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
} else if (VERSION_TYPE.match(currentFieldName)) {
versionType = VersionType.fromString(parser.text());
} else if ("_retry_on_conflict".equals(currentFieldName) || "_retryOnConflict".equals(currentFieldName)) {
} else if (RETRY_ON_CONFLICT.match(currentFieldName)) {
retryOnConflict = parser.intValue();
} else if ("pipeline".equals(currentFieldName)) {
} else if (PIPELINE.match(currentFieldName)) {
pipeline = parser.text();
} else if ("fields".equals(currentFieldName)) {
} else if (FIELDS.match(currentFieldName)) {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
} else if ("_source".equals(currentFieldName)) {
} else if (SOURCE.match(currentFieldName)) {
fetchSourceContext = FetchSourceContext.fromXContent(parser);
} else {
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
if (FIELDS.match(currentFieldName)) {
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
List<Object> values = parser.list();
fields = values.toArray(new String[values.size()]);
} else {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
}
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
} else if (token == XContentParser.Token.START_OBJECT && SOURCE.match(currentFieldName)) {
fetchSourceContext = FetchSourceContext.fromXContent(parser);
} else if (token != XContentParser.Token.VALUE_NULL) {
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");

View File

@ -28,6 +28,7 @@ import org.elasticsearch.action.RealtimeRequest;
import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
@ -48,6 +49,17 @@ import java.util.Locale;
public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetRequest.Item>, CompositeIndicesRequest, RealtimeRequest {
private static final ParseField INDEX = new ParseField("_index");
private static final ParseField TYPE = new ParseField("_type");
private static final ParseField ID = new ParseField("_id");
private static final ParseField ROUTING = new ParseField("routing");
private static final ParseField PARENT = new ParseField("parent");
private static final ParseField VERSION = new ParseField("version");
private static final ParseField VERSION_TYPE = new ParseField("version_type");
private static final ParseField FIELDS = new ParseField("fields");
private static final ParseField STORED_FIELDS = new ParseField("stored_fields");
private static final ParseField SOURCE = new ParseField("_source");
/**
* A single get item.
*/
@ -379,30 +391,30 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if ("_index".equals(currentFieldName)) {
if (INDEX.match(currentFieldName)) {
if (!allowExplicitIndex) {
throw new IllegalArgumentException("explicit index in multi get is not allowed");
}
index = parser.text();
} else if ("_type".equals(currentFieldName)) {
} else if (TYPE.match(currentFieldName)) {
type = parser.text();
} else if ("_id".equals(currentFieldName)) {
} else if (ID.match(currentFieldName)) {
id = parser.text();
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
} else if (ROUTING.match(currentFieldName)) {
routing = parser.text();
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
} else if (PARENT.match(currentFieldName)) {
parent = parser.text();
} else if ("fields".equals(currentFieldName)) {
} else if (FIELDS.match(currentFieldName)) {
throw new ParsingException(parser.getTokenLocation(),
"Unsupported field [fields] used, expected [stored_fields] instead");
} else if ("stored_fields".equals(currentFieldName)) {
} else if (STORED_FIELDS.match(currentFieldName)) {
storedFields = new ArrayList<>();
storedFields.add(parser.text());
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
} else if (VERSION.match(currentFieldName)) {
version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
} else if (VERSION_TYPE.match(currentFieldName)) {
versionType = VersionType.fromString(parser.text());
} else if ("_source".equals(currentFieldName)) {
} else if (SOURCE.match(currentFieldName)) {
// check lenient to avoid interpreting the value as string but parse strict in order to provoke an error early on.
if (parser.isBooleanValueLenient()) {
fetchSourceContext = new FetchSourceContext(parser.booleanValue(), fetchSourceContext.includes(),
@ -413,17 +425,19 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
} else {
throw new ElasticsearchParseException("illegal type for _source: [{}]", token);
}
} else {
throw new ElasticsearchParseException("failed to parse multi get request. unknown field [{}]", currentFieldName);
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("fields".equals(currentFieldName)) {
if (FIELDS.match(currentFieldName)) {
throw new ParsingException(parser.getTokenLocation(),
"Unsupported field [fields] used, expected [stored_fields] instead");
} else if ("stored_fields".equals(currentFieldName)) {
} else if (STORED_FIELDS.match(currentFieldName)) {
storedFields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
storedFields.add(parser.text());
}
} else if ("_source".equals(currentFieldName)) {
} else if (SOURCE.match(currentFieldName)) {
ArrayList<String> includes = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
includes.add(parser.text());
@ -433,7 +447,7 @@ public class MultiGetRequest extends ActionRequest implements Iterable<MultiGetR
}
} else if (token == XContentParser.Token.START_OBJECT) {
if ("_source".equals(currentFieldName)) {
if (SOURCE.match(currentFieldName)) {
List<String> currentList = null, includes = null, excludes = null;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {

View File

@ -18,11 +18,13 @@
*/
package org.elasticsearch.action.resync;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.replication.ReplicationOperation;
import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
@ -158,6 +160,15 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
@Override
public void handleResponse(ResyncReplicationResponse response) {
final ReplicationResponse.ShardInfo.Failure[] failures = response.getShardInfo().getFailures();
// noinspection ForLoopReplaceableByForEach
for (int i = 0; i < failures.length; i++) {
final ReplicationResponse.ShardInfo.Failure f = failures[i];
logger.info(
new ParameterizedMessage(
"{} primary-replica resync to replica on node [{}] failed", f.fullShardId(), f.nodeId()),
f.getCause());
}
listener.onResponse(response);
}

View File

@ -27,6 +27,7 @@ import org.elasticsearch.action.ValidateActions;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
@ -60,6 +61,22 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
*/
public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> implements RealtimeRequest {
private static final ParseField INDEX = new ParseField("_index");
private static final ParseField TYPE = new ParseField("_type");
private static final ParseField ID = new ParseField("_id");
private static final ParseField ROUTING = new ParseField("routing");
private static final ParseField PARENT = new ParseField("parent");
private static final ParseField VERSION = new ParseField("version");
private static final ParseField VERSION_TYPE = new ParseField("version_type");
private static final ParseField FIELDS = new ParseField("fields");
private static final ParseField OFFSETS = new ParseField("offsets");
private static final ParseField POSITIONS = new ParseField("positions");
private static final ParseField PAYLOADS = new ParseField("payloads");
private static final ParseField DFS = new ParseField("dfs");
private static final ParseField FILTER = new ParseField("filter");
private static final ParseField DOC = new ParseField("doc");
private String type;
private String id;
@ -593,7 +610,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (currentFieldName != null) {
if (currentFieldName.equals("fields")) {
if (FIELDS.match(currentFieldName)) {
if (token == XContentParser.Token.START_ARRAY) {
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
@ -601,43 +618,43 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
} else {
throw new ElasticsearchParseException("failed to parse term vectors request. field [fields] must be an array");
}
} else if (currentFieldName.equals("offsets")) {
} else if (OFFSETS.match(currentFieldName)) {
termVectorsRequest.offsets(parser.booleanValue());
} else if (currentFieldName.equals("positions")) {
} else if (POSITIONS.match(currentFieldName)) {
termVectorsRequest.positions(parser.booleanValue());
} else if (currentFieldName.equals("payloads")) {
} else if (PAYLOADS.match(currentFieldName)) {
termVectorsRequest.payloads(parser.booleanValue());
} else if (currentFieldName.equals("term_statistics") || currentFieldName.equals("termStatistics")) {
termVectorsRequest.termStatistics(parser.booleanValue());
} else if (currentFieldName.equals("field_statistics") || currentFieldName.equals("fieldStatistics")) {
termVectorsRequest.fieldStatistics(parser.booleanValue());
} else if (currentFieldName.equals("dfs")) {
} else if (DFS.match(currentFieldName)) {
throw new IllegalArgumentException("distributed frequencies is not supported anymore for term vectors");
} else if (currentFieldName.equals("per_field_analyzer") || currentFieldName.equals("perFieldAnalyzer")) {
termVectorsRequest.perFieldAnalyzer(readPerFieldAnalyzer(parser.map()));
} else if (currentFieldName.equals("filter")) {
} else if (FILTER.match(currentFieldName)) {
termVectorsRequest.filterSettings(readFilterSettings(parser));
} else if ("_index".equals(currentFieldName)) { // the following is important for multi request parsing.
} else if (INDEX.match(currentFieldName)) { // the following is important for multi request parsing.
termVectorsRequest.index = parser.text();
} else if ("_type".equals(currentFieldName)) {
} else if (TYPE.match(currentFieldName)) {
termVectorsRequest.type = parser.text();
} else if ("_id".equals(currentFieldName)) {
} else if (ID.match(currentFieldName)) {
if (termVectorsRequest.doc != null) {
throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!");
}
termVectorsRequest.id = parser.text();
} else if ("doc".equals(currentFieldName)) {
} else if (DOC.match(currentFieldName)) {
if (termVectorsRequest.id != null) {
throw new ElasticsearchParseException("failed to parse term vectors request. either [id] or [doc] can be specified, but not both!");
}
termVectorsRequest.doc(jsonBuilder().copyCurrentStructure(parser));
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
} else if (ROUTING.match(currentFieldName)) {
termVectorsRequest.routing = parser.text();
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
} else if (PARENT.match(currentFieldName)) {
termVectorsRequest.parent = parser.text();
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
} else if (VERSION.match(currentFieldName)) {
termVectorsRequest.version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
} else if (VERSION_TYPE.match(currentFieldName)) {
termVectorsRequest.versionType = VersionType.fromString(parser.text());
} else {
throw new ElasticsearchParseException("failed to parse term vectors request. unknown field [{}]", currentFieldName);

View File

@ -30,7 +30,6 @@ import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.StringHelper;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.Version;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.cli.UserException;
import org.elasticsearch.common.PidFile;
import org.elasticsearch.common.SuppressForbidden;
@ -245,7 +244,6 @@ final class Bootstrap {
final SecureSettings secureSettings,
final Settings initialSettings,
final Path configPath) {
Terminal terminal = foreground ? Terminal.DEFAULT : null;
Settings.Builder builder = Settings.builder();
if (pidFile != null) {
builder.put(Environment.PIDFILE_SETTING.getKey(), pidFile);
@ -254,7 +252,7 @@ final class Bootstrap {
if (secureSettings != null) {
builder.setSecureSettings(secureSettings);
}
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, Collections.emptyMap(), configPath);
return InternalSettingsPreparer.prepareEnvironment(builder.build(), Collections.emptyMap(), configPath);
}
private void start() throws NodeValidationException {

View File

@ -22,7 +22,9 @@ package org.elasticsearch.cli;
import joptsimple.OptionSet;
import joptsimple.OptionSpec;
import joptsimple.util.KeyValuePair;
import org.apache.logging.log4j.Level;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.logging.LogConfigurator;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.Environment;
import org.elasticsearch.node.InternalSettingsPreparer;
@ -66,16 +68,16 @@ public abstract class EnvironmentAwareCommand extends Command {
putSystemPropertyIfSettingIsMissing(settings, "path.home", "es.path.home");
putSystemPropertyIfSettingIsMissing(settings, "path.logs", "es.path.logs");
execute(terminal, options, createEnv(terminal, settings));
execute(terminal, options, createEnv(settings));
}
/** Create an {@link Environment} for the command to use. Overrideable for tests. */
protected Environment createEnv(final Terminal terminal, final Map<String, String> settings) throws UserException {
protected Environment createEnv(final Map<String, String> settings) throws UserException {
final String esPathConf = System.getProperty("es.path.conf");
if (esPathConf == null) {
throw new UserException(ExitCodes.CONFIG, "the system property [es.path.conf] must be set");
}
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, terminal, settings, getConfigPath(esPathConf));
return InternalSettingsPreparer.prepareEnvironment(Settings.EMPTY, settings, getConfigPath(esPathConf));
}
@SuppressForbidden(reason = "need path to construct environment")
@ -102,6 +104,26 @@ public abstract class EnvironmentAwareCommand extends Command {
}
}
@Override
protected final void beforeExecute() {
if (shouldConfigureLoggingWithoutConfig()) {
// initialize default for es.logger.level because we will not read the log4j2.properties
final String loggerLevel = System.getProperty("es.logger.level", Level.INFO.name());
final Settings settings = Settings.builder().put("logger.level", loggerLevel).build();
LogConfigurator.configureWithoutConfig(settings);
}
}
/**
* Indicate whether or not logging should be configured without reading a log4j2.properties. Most commands should do this because we do
* not configure logging for CLI tools. Only commands that configure logging on their own should not do this.
*
* @return true if logging should be configured without reading a log4j2.properties file
*/
protected boolean shouldConfigureLoggingWithoutConfig() {
return true;
}
/** Execute the command with the initialized {@link Environment}. */
protected abstract void execute(Terminal terminal, OptionSet options, Environment env) throws Exception;

View File

@ -118,7 +118,7 @@ public abstract class AllocationDecider extends AbstractComponent {
// On a NO decision, by default, we allow force allocating the primary.
return allocation.decision(Decision.YES,
decision.label(),
"primary shard [{}] allowed to force allocate on node [{}]",
"primary shard [%s] allowed to force allocate on node [%s]",
shardRouting.shardId(), node.nodeId());
} else {
// On a THROTTLE/YES decision, we use the same decision instead of forcing allocation

View File

@ -116,20 +116,20 @@ public class EnableAllocationDecider extends AllocationDecider {
case ALL:
return allocation.decision(Decision.YES, NAME, "all allocations are allowed");
case NONE:
return allocation.decision(Decision.NO, NAME, "no allocations are allowed due to {}", setting(enable, usedIndexSetting));
return allocation.decision(Decision.NO, NAME, "no allocations are allowed due to %s", setting(enable, usedIndexSetting));
case NEW_PRIMARIES:
if (shardRouting.primary() && shardRouting.active() == false &&
shardRouting.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE) {
return allocation.decision(Decision.YES, NAME, "new primary allocations are allowed");
} else {
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden due to {}",
return allocation.decision(Decision.NO, NAME, "non-new primary allocations are forbidden due to %s",
setting(enable, usedIndexSetting));
}
case PRIMARIES:
if (shardRouting.primary()) {
return allocation.decision(Decision.YES, NAME, "primary allocations are allowed");
} else {
return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden due to {}",
return allocation.decision(Decision.NO, NAME, "replica allocations are forbidden due to %s",
setting(enable, usedIndexSetting));
}
default:

View File

@ -1,29 +0,0 @@
/*
* Copyright (C) 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.elasticsearch.common.inject.internal;
/**
* Wraps an exception that occurred during a computation in a different thread.
*
* @author Bob Lee
*/
public class AsynchronousComputationException extends ComputationException {
public AsynchronousComputationException(Throwable cause) {
super(cause);
}
}

View File

@ -1,27 +0,0 @@
/*
* Copyright (C) 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.elasticsearch.common.inject.internal;
/**
* Wraps an exception that occurred during a computation.
*/
public class ComputationException extends RuntimeException {
public ComputationException(Throwable cause) {
super(cause);
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright (C) 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.elasticsearch.common.inject.internal;
/**
* Thrown when a computer function returns null. This subclass exists so
* that our ReferenceCache adapter can differentiate null output from null
* keys, but we don't want to make this public otherwise.
*
* @author Bob Lee
*/
class NullOutputException extends NullPointerException {
NullOutputException(String s) {
super(s);
}
}

View File

@ -79,7 +79,7 @@ class TimedRunnable extends AbstractRunnable {
// There must have been an exception thrown, the total time is unknown (-1)
return -1;
}
return finishTimeNanos - creationTimeNanos;
return Math.max(finishTimeNanos - creationTimeNanos, 1);
}
/**
@ -91,7 +91,7 @@ class TimedRunnable extends AbstractRunnable {
// There must have been an exception thrown, the total time is unknown (-1)
return -1;
}
return finishTimeNanos - startTimeNanos;
return Math.max(finishTimeNanos - startTimeNanos, 1);
}
}

View File

@ -34,6 +34,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Explicit;
import org.elasticsearch.common.geo.GeoUtils;
import org.elasticsearch.common.geo.SpatialStrategy;
import org.elasticsearch.common.geo.XShapeCollection;
import org.elasticsearch.common.geo.builders.ShapeBuilder;
import org.elasticsearch.common.geo.builders.ShapeBuilder.Orientation;
import org.elasticsearch.common.geo.parsers.ShapeParser;
@ -463,7 +464,6 @@ public class GeoShapeFieldMapper extends FieldMapper {
public GeoShapeFieldType fieldType() {
return (GeoShapeFieldType) super.fieldType();
}
@Override
public Mapper parse(ParseContext context) throws IOException {
try {
@ -475,14 +475,20 @@ public class GeoShapeFieldMapper extends FieldMapper {
}
shape = shapeBuilder.build();
}
if (fieldType().pointsOnly() && !(shape instanceof Point)) {
throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " +
((shape instanceof JtsGeometry) ? ((JtsGeometry) shape).getGeom().getGeometryType() : shape.getClass()) + " was found");
}
List<IndexableField> fields = new ArrayList<>(Arrays.asList(fieldType().defaultStrategy().createIndexableFields(shape)));
createFieldNamesField(context, fields);
for (IndexableField field : fields) {
context.doc().add(field);
if (fieldType().pointsOnly() == true) {
// index configured for pointsOnly
if (shape instanceof XShapeCollection && XShapeCollection.class.cast(shape).pointsOnly()) {
// MULTIPOINT data: index each point separately
List<Shape> shapes = ((XShapeCollection) shape).getShapes();
for (Shape s : shapes) {
indexShape(context, s);
}
} else if (shape instanceof Point == false) {
throw new MapperParsingException("[{" + fieldType().name() + "}] is configured for points only but a " +
((shape instanceof JtsGeometry) ? ((JtsGeometry)shape).getGeom().getGeometryType() : shape.getClass()) + " was found");
}
} else {
indexShape(context, shape);
}
} catch (Exception e) {
if (ignoreMalformed.value() == false) {
@ -492,6 +498,14 @@ public class GeoShapeFieldMapper extends FieldMapper {
return null;
}
private void indexShape(ParseContext context, Shape shape) {
List<IndexableField> fields = new ArrayList<>(Arrays.asList(fieldType().defaultStrategy().createIndexableFields(shape)));
createFieldNamesField(context, fields);
for (IndexableField field : fields) {
context.doc().add(field);
}
}
@Override
protected void parseCreateField(ParseContext context, List<IndexableField> fields) throws IOException {
}

View File

@ -245,15 +245,18 @@ public class SeqNoFieldMapper extends MetadataFieldMapper {
@Override
public void postParse(ParseContext context) throws IOException {
// In the case of nested docs, let's fill nested docs with seqNo=1 and
// primaryTerm=0 so that Lucene doesn't write a Bitset for documents
// that don't have the field. This is consistent with the default value
// In the case of nested docs, let's fill nested docs with the original
// so that Lucene doesn't write a Bitset for documents that
// don't have the field. This is consistent with the default value
// for efficiency.
// we share the parent docs fields to ensure good compression
SequenceIDFields seqID = context.seqID();
assert seqID != null;
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new LongPoint(NAME, 1));
doc.add(new NumericDocValuesField(NAME, 1L));
doc.add(new NumericDocValuesField(PRIMARY_TERM_NAME, 0L));
doc.add(seqID.seqNo);
doc.add(seqID.seqNoDocValue);
doc.add(seqID.primaryTerm);
}
}

View File

@ -126,9 +126,11 @@ public class VersionFieldMapper extends MetadataFieldMapper {
public void postParse(ParseContext context) throws IOException {
// In the case of nested docs, let's fill nested docs with version=1 so that Lucene doesn't write a Bitset for documents
// that don't have the field. This is consistent with the default value for efficiency.
Field version = context.version();
assert version != null;
for (int i = 1; i < context.docs().size(); i++) {
final Document doc = context.docs().get(i);
doc.add(new NumericDocValuesField(NAME, 1L));
doc.add(version);
}
}

View File

@ -92,23 +92,31 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
private static final Set<Class<? extends MappedFieldType>> SUPPORTED_FIELD_TYPES = new HashSet<>(
Arrays.asList(TextFieldType.class, KeywordFieldType.class));
private interface Field {
ParseField FIELDS = new ParseField("fields");
ParseField LIKE = new ParseField("like");
ParseField UNLIKE = new ParseField("unlike");
ParseField MAX_QUERY_TERMS = new ParseField("max_query_terms");
ParseField MIN_TERM_FREQ = new ParseField("min_term_freq");
ParseField MIN_DOC_FREQ = new ParseField("min_doc_freq");
ParseField MAX_DOC_FREQ = new ParseField("max_doc_freq");
ParseField MIN_WORD_LENGTH = new ParseField("min_word_length");
ParseField MAX_WORD_LENGTH = new ParseField("max_word_length");
ParseField STOP_WORDS = new ParseField("stop_words");
ParseField ANALYZER = new ParseField("analyzer");
ParseField MINIMUM_SHOULD_MATCH = new ParseField("minimum_should_match");
ParseField BOOST_TERMS = new ParseField("boost_terms");
ParseField INCLUDE = new ParseField("include");
ParseField FAIL_ON_UNSUPPORTED_FIELD = new ParseField("fail_on_unsupported_field");
}
private static final ParseField FIELDS = new ParseField("fields");
private static final ParseField LIKE = new ParseField("like");
private static final ParseField UNLIKE = new ParseField("unlike");
private static final ParseField MAX_QUERY_TERMS = new ParseField("max_query_terms");
private static final ParseField MIN_TERM_FREQ = new ParseField("min_term_freq");
private static final ParseField MIN_DOC_FREQ = new ParseField("min_doc_freq");
private static final ParseField MAX_DOC_FREQ = new ParseField("max_doc_freq");
private static final ParseField MIN_WORD_LENGTH = new ParseField("min_word_length");
private static final ParseField MAX_WORD_LENGTH = new ParseField("max_word_length");
private static final ParseField STOP_WORDS = new ParseField("stop_words");
private static final ParseField ANALYZER = new ParseField("analyzer");
private static final ParseField MINIMUM_SHOULD_MATCH = new ParseField("minimum_should_match");
private static final ParseField BOOST_TERMS = new ParseField("boost_terms");
private static final ParseField INCLUDE = new ParseField("include");
private static final ParseField FAIL_ON_UNSUPPORTED_FIELD = new ParseField("fail_on_unsupported_field");
private static final ParseField INDEX = new ParseField("_index");
private static final ParseField TYPE = new ParseField("_type");
private static final ParseField ID = new ParseField("_id");
public static final ParseField DOC = new ParseField("doc");
private static final ParseField PER_FIELD_ANALYZER = new ParseField("per_field_analyzer");
private static final ParseField ROUTING = new ParseField("routing");
private static final ParseField VERSION = new ParseField("version");
private static final ParseField VERSION_TYPE = new ParseField("version_type");
// document inputs
private final String[] fields;
@ -141,18 +149,6 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
public static final class Item implements ToXContentObject, Writeable {
public static final Item[] EMPTY_ARRAY = new Item[0];
public interface Field {
ParseField INDEX = new ParseField("_index");
ParseField TYPE = new ParseField("_type");
ParseField ID = new ParseField("_id");
ParseField DOC = new ParseField("doc");
ParseField FIELDS = new ParseField("fields");
ParseField PER_FIELD_ANALYZER = new ParseField("per_field_analyzer");
ParseField ROUTING = new ParseField("_routing");
ParseField VERSION = new ParseField("_version");
ParseField VERSION_TYPE = new ParseField("_version_type");
}
private String index;
private String type;
private String id;
@ -370,16 +366,16 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (currentFieldName != null) {
if (Field.INDEX.match(currentFieldName)) {
if (INDEX.match(currentFieldName)) {
item.index = parser.text();
} else if (Field.TYPE.match(currentFieldName)) {
} else if (TYPE.match(currentFieldName)) {
item.type = parser.text();
} else if (Field.ID.match(currentFieldName)) {
} else if (ID.match(currentFieldName)) {
item.id = parser.text();
} else if (Field.DOC.match(currentFieldName)) {
} else if (DOC.match(currentFieldName)) {
item.doc = jsonBuilder().copyCurrentStructure(parser).bytes();
item.xContentType = XContentType.JSON;
} else if (Field.FIELDS.match(currentFieldName)) {
} else if (FIELDS.match(currentFieldName)) {
if (token == XContentParser.Token.START_ARRAY) {
List<String> fields = new ArrayList<>();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
@ -390,14 +386,13 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
throw new ElasticsearchParseException(
"failed to parse More Like This item. field [fields] must be an array");
}
} else if (Field.PER_FIELD_ANALYZER.match(currentFieldName)) {
} else if (PER_FIELD_ANALYZER.match(currentFieldName)) {
item.perFieldAnalyzer(TermVectorsRequest.readPerFieldAnalyzer(parser.map()));
} else if ("_routing".equals(currentFieldName) || "routing".equals(currentFieldName)) {
} else if (ROUTING.match(currentFieldName)) {
item.routing = parser.text();
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
} else if (VERSION.match(currentFieldName)) {
item.version = parser.longValue();
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName)
|| "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
} else if (VERSION_TYPE.match(currentFieldName)) {
item.versionType = VersionType.fromString(parser.text());
} else {
throw new ElasticsearchParseException(
@ -420,31 +415,31 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
if (this.index != null) {
builder.field(Field.INDEX.getPreferredName(), this.index);
builder.field(INDEX.getPreferredName(), this.index);
}
if (this.type != null) {
builder.field(Field.TYPE.getPreferredName(), this.type);
builder.field(TYPE.getPreferredName(), this.type);
}
if (this.id != null) {
builder.field(Field.ID.getPreferredName(), this.id);
builder.field(ID.getPreferredName(), this.id);
}
if (this.doc != null) {
builder.rawField(Field.DOC.getPreferredName(), this.doc, xContentType);
builder.rawField(DOC.getPreferredName(), this.doc, xContentType);
}
if (this.fields != null) {
builder.array(Field.FIELDS.getPreferredName(), this.fields);
builder.array(FIELDS.getPreferredName(), this.fields);
}
if (this.perFieldAnalyzer != null) {
builder.field(Field.PER_FIELD_ANALYZER.getPreferredName(), this.perFieldAnalyzer);
builder.field(PER_FIELD_ANALYZER.getPreferredName(), this.perFieldAnalyzer);
}
if (this.routing != null) {
builder.field(Field.ROUTING.getPreferredName(), this.routing);
builder.field(ROUTING.getPreferredName(), this.routing);
}
if (this.version != Versions.MATCH_ANY) {
builder.field(Field.VERSION.getPreferredName(), this.version);
builder.field(VERSION.getPreferredName(), this.version);
}
if (this.versionType != VersionType.INTERNAL) {
builder.field(Field.VERSION_TYPE.getPreferredName(), this.versionType.toString().toLowerCase(Locale.ROOT));
builder.field(VERSION_TYPE.getPreferredName(), this.versionType.toString().toLowerCase(Locale.ROOT));
}
return builder.endObject();
}
@ -781,26 +776,26 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
protected void doXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(NAME);
if (fields != null) {
builder.array(Field.FIELDS.getPreferredName(), fields);
builder.array(FIELDS.getPreferredName(), fields);
}
buildLikeField(builder, Field.LIKE.getPreferredName(), likeTexts, likeItems);
buildLikeField(builder, Field.UNLIKE.getPreferredName(), unlikeTexts, unlikeItems);
builder.field(Field.MAX_QUERY_TERMS.getPreferredName(), maxQueryTerms);
builder.field(Field.MIN_TERM_FREQ.getPreferredName(), minTermFreq);
builder.field(Field.MIN_DOC_FREQ.getPreferredName(), minDocFreq);
builder.field(Field.MAX_DOC_FREQ.getPreferredName(), maxDocFreq);
builder.field(Field.MIN_WORD_LENGTH.getPreferredName(), minWordLength);
builder.field(Field.MAX_WORD_LENGTH.getPreferredName(), maxWordLength);
buildLikeField(builder, LIKE.getPreferredName(), likeTexts, likeItems);
buildLikeField(builder, UNLIKE.getPreferredName(), unlikeTexts, unlikeItems);
builder.field(MAX_QUERY_TERMS.getPreferredName(), maxQueryTerms);
builder.field(MIN_TERM_FREQ.getPreferredName(), minTermFreq);
builder.field(MIN_DOC_FREQ.getPreferredName(), minDocFreq);
builder.field(MAX_DOC_FREQ.getPreferredName(), maxDocFreq);
builder.field(MIN_WORD_LENGTH.getPreferredName(), minWordLength);
builder.field(MAX_WORD_LENGTH.getPreferredName(), maxWordLength);
if (stopWords != null) {
builder.array(Field.STOP_WORDS.getPreferredName(), stopWords);
builder.array(STOP_WORDS.getPreferredName(), stopWords);
}
if (analyzer != null) {
builder.field(Field.ANALYZER.getPreferredName(), analyzer);
builder.field(ANALYZER.getPreferredName(), analyzer);
}
builder.field(Field.MINIMUM_SHOULD_MATCH.getPreferredName(), minimumShouldMatch);
builder.field(Field.BOOST_TERMS.getPreferredName(), boostTerms);
builder.field(Field.INCLUDE.getPreferredName(), include);
builder.field(Field.FAIL_ON_UNSUPPORTED_FIELD.getPreferredName(), failOnUnsupportedField);
builder.field(MINIMUM_SHOULD_MATCH.getPreferredName(), minimumShouldMatch);
builder.field(BOOST_TERMS.getPreferredName(), boostTerms);
builder.field(INCLUDE.getPreferredName(), include);
builder.field(FAIL_ON_UNSUPPORTED_FIELD.getPreferredName(), failOnUnsupportedField);
printBoostAndQueryName(builder);
builder.endObject();
}
@ -839,31 +834,31 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (Field.LIKE.match(currentFieldName)) {
if (LIKE.match(currentFieldName)) {
parseLikeField(parser, likeTexts, likeItems);
} else if (Field.UNLIKE.match(currentFieldName)) {
} else if (UNLIKE.match(currentFieldName)) {
parseLikeField(parser, unlikeTexts, unlikeItems);
} else if (Field.MAX_QUERY_TERMS.match(currentFieldName)) {
} else if (MAX_QUERY_TERMS.match(currentFieldName)) {
maxQueryTerms = parser.intValue();
} else if (Field.MIN_TERM_FREQ.match(currentFieldName)) {
} else if (MIN_TERM_FREQ.match(currentFieldName)) {
minTermFreq =parser.intValue();
} else if (Field.MIN_DOC_FREQ.match(currentFieldName)) {
} else if (MIN_DOC_FREQ.match(currentFieldName)) {
minDocFreq = parser.intValue();
} else if (Field.MAX_DOC_FREQ.match(currentFieldName)) {
} else if (MAX_DOC_FREQ.match(currentFieldName)) {
maxDocFreq = parser.intValue();
} else if (Field.MIN_WORD_LENGTH.match(currentFieldName)) {
} else if (MIN_WORD_LENGTH.match(currentFieldName)) {
minWordLength = parser.intValue();
} else if (Field.MAX_WORD_LENGTH.match(currentFieldName)) {
} else if (MAX_WORD_LENGTH.match(currentFieldName)) {
maxWordLength = parser.intValue();
} else if (Field.ANALYZER.match(currentFieldName)) {
} else if (ANALYZER.match(currentFieldName)) {
analyzer = parser.text();
} else if (Field.MINIMUM_SHOULD_MATCH.match(currentFieldName)) {
} else if (MINIMUM_SHOULD_MATCH.match(currentFieldName)) {
minimumShouldMatch = parser.text();
} else if (Field.BOOST_TERMS.match(currentFieldName)) {
} else if (BOOST_TERMS.match(currentFieldName)) {
boostTerms = parser.floatValue();
} else if (Field.INCLUDE.match(currentFieldName)) {
} else if (INCLUDE.match(currentFieldName)) {
include = parser.booleanValue();
} else if (Field.FAIL_ON_UNSUPPORTED_FIELD.match(currentFieldName)) {
} else if (FAIL_ON_UNSUPPORTED_FIELD.match(currentFieldName)) {
failOnUnsupportedField = parser.booleanValue();
} else if ("boost".equals(currentFieldName)) {
boost = parser.floatValue();
@ -873,20 +868,20 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
throw new ParsingException(parser.getTokenLocation(), "[mlt] query does not support [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if (Field.FIELDS.match(currentFieldName)) {
if (FIELDS.match(currentFieldName)) {
fields = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
fields.add(parser.text());
}
} else if (Field.LIKE.match(currentFieldName)) {
} else if (LIKE.match(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
parseLikeField(parser, likeTexts, likeItems);
}
} else if (Field.UNLIKE.match(currentFieldName)) {
} else if (UNLIKE.match(currentFieldName)) {
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
parseLikeField(parser, unlikeTexts, unlikeItems);
}
} else if (Field.STOP_WORDS.match(currentFieldName)) {
} else if (STOP_WORDS.match(currentFieldName)) {
stopWords = new ArrayList<>();
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
stopWords.add(parser.text());
@ -895,9 +890,9 @@ public class MoreLikeThisQueryBuilder extends AbstractQueryBuilder<MoreLikeThisQ
throw new ParsingException(parser.getTokenLocation(), "[mlt] query does not support [" + currentFieldName + "]");
}
} else if (token == XContentParser.Token.START_OBJECT) {
if (Field.LIKE.match(currentFieldName)) {
if (LIKE.match(currentFieldName)) {
parseLikeField(parser, likeTexts, likeItems);
} else if (Field.UNLIKE.match(currentFieldName)) {
} else if (UNLIKE.match(currentFieldName)) {
parseLikeField(parser, unlikeTexts, unlikeItems);
} else {
throw new ParsingException(parser.getTokenLocation(), "[mlt] query does not support [" + currentFieldName + "]");

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@ -36,9 +37,11 @@ import org.elasticsearch.index.query.support.QueryParsers;
import org.elasticsearch.index.search.MatchQuery;
import org.elasticsearch.index.search.MultiMatchQuery;
import org.elasticsearch.index.search.QueryParserHelper;
import org.elasticsearch.index.search.QueryStringQueryParser;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Objects;
@ -55,7 +58,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
public static final int DEFAULT_PHRASE_SLOP = MatchQuery.DEFAULT_PHRASE_SLOP;
public static final int DEFAULT_PREFIX_LENGTH = FuzzyQuery.defaultPrefixLength;
public static final int DEFAULT_MAX_EXPANSIONS = FuzzyQuery.defaultMaxExpansions;
public static final boolean DEFAULT_LENIENCY = MatchQuery.DEFAULT_LENIENCY;
public static final MatchQuery.ZeroTermsQuery DEFAULT_ZERO_TERMS_QUERY = MatchQuery.DEFAULT_ZERO_TERMS_QUERY;
public static final boolean DEFAULT_FUZZY_TRANSPOSITIONS = FuzzyQuery.defaultTranspositions;
@ -91,7 +93,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
private String fuzzyRewrite = null;
private Boolean useDisMax;
private Float tieBreaker;
private boolean lenient = DEFAULT_LENIENCY;
private Boolean lenient;
private Float cutoffFrequency = null;
private MatchQuery.ZeroTermsQuery zeroTermsQuery = DEFAULT_ZERO_TERMS_QUERY;
private boolean autoGenerateSynonymsPhraseQuery = true;
@ -223,7 +225,11 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
fuzzyRewrite = in.readOptionalString();
useDisMax = in.readOptionalBoolean();
tieBreaker = in.readOptionalFloat();
lenient = in.readBoolean();
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
lenient = in.readOptionalBoolean();
} else {
lenient = in.readBoolean();
}
cutoffFrequency = in.readOptionalFloat();
zeroTermsQuery = MatchQuery.ZeroTermsQuery.readFromStream(in);
if (in.getVersion().onOrAfter(Version.V_6_1_0)) {
@ -251,7 +257,11 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
out.writeOptionalString(fuzzyRewrite);
out.writeOptionalBoolean(useDisMax);
out.writeOptionalFloat(tieBreaker);
out.writeBoolean(lenient);
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
out.writeOptionalBoolean(lenient);
} else {
out.writeBoolean(lenient == null ? MatchQuery.DEFAULT_LENIENCY : lenient);
}
out.writeOptionalFloat(cutoffFrequency);
zeroTermsQuery.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_6_1_0)) {
@ -488,7 +498,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
}
public boolean lenient() {
return lenient;
return lenient == null ? MatchQuery.DEFAULT_LENIENCY : lenient;
}
/**
@ -588,7 +598,9 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
if (tieBreaker != null) {
builder.field(TIE_BREAKER_FIELD.getPreferredName(), tieBreaker);
}
builder.field(LENIENT_FIELD.getPreferredName(), lenient);
if (lenient != null) {
builder.field(LENIENT_FIELD.getPreferredName(), lenient);
}
if (cutoffFrequency != null) {
builder.field(CUTOFF_FREQUENCY_FIELD.getPreferredName(), cutoffFrequency);
}
@ -614,7 +626,7 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
Boolean useDisMax = null;
Float tieBreaker = null;
Float cutoffFrequency = null;
boolean lenient = DEFAULT_LENIENCY;
Boolean lenient = null;
MatchQuery.ZeroTermsQuery zeroTermsQuery = DEFAULT_ZERO_TERMS_QUERY;
boolean autoGenerateSynonymsPhraseQuery = true;
boolean fuzzyTranspositions = DEFAULT_FUZZY_TRANSPOSITIONS;
@ -698,16 +710,12 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
throw new ParsingException(parser.getTokenLocation(), "No text specified for multi_match query");
}
if (fieldsBoosts.isEmpty()) {
throw new ParsingException(parser.getTokenLocation(), "No fields specified for multi_match query");
}
if (fuzziness != null && (type == Type.CROSS_FIELDS || type == Type.PHRASE || type == Type.PHRASE_PREFIX)) {
throw new ParsingException(parser.getTokenLocation(),
"Fuzziness not allowed for type [" + type.parseField.getPreferredName() + "]");
}
return new MultiMatchQueryBuilder(value)
MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder(value)
.fields(fieldsBoosts)
.type(type)
.analyzer(analyzer)
@ -715,7 +723,6 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
.fuzziness(fuzziness)
.fuzzyRewrite(fuzzyRewrite)
.useDisMax(useDisMax)
.lenient(lenient)
.maxExpansions(maxExpansions)
.minimumShouldMatch(minimumShouldMatch)
.operator(operator)
@ -727,6 +734,10 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
.boost(boost)
.queryName(queryName)
.fuzzyTranspositions(fuzzyTranspositions);
if (lenient != null) {
builder.lenient(lenient);
}
return builder;
}
private static void parseFieldAndBoost(XContentParser parser, Map<String, Float> fieldsBoosts) throws IOException {
@ -778,7 +789,9 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
if (cutoffFrequency != null) {
multiMatchQuery.setCommonTermsCutoff(cutoffFrequency);
}
multiMatchQuery.setLenient(lenient);
if (lenient != null) {
multiMatchQuery.setLenient(lenient);
}
multiMatchQuery.setZeroTermsQuery(zeroTermsQuery);
multiMatchQuery.setAutoGenerateSynonymsPhraseQuery(autoGenerateSynonymsPhraseQuery);
multiMatchQuery.setTranspositions(fuzzyTranspositions);
@ -793,8 +806,20 @@ public class MultiMatchQueryBuilder extends AbstractQueryBuilder<MultiMatchQuery
}
}
}
Map<String, Float> newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, fieldsBoosts);
Map<String, Float> newFieldsBoosts;
if (fieldsBoosts.isEmpty()) {
// no fields provided, defaults to index.query.default_field
List<String> defaultFields = context.defaultFields();
boolean isAllField = defaultFields.size() == 1 && Regex.isMatchAllPattern(defaultFields.get(0));
if (isAllField && lenient == null) {
// Sets leniency to true if not explicitly
// set in the request
multiMatchQuery.setLenient(true);
}
newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, QueryParserHelper.parseFieldsAndWeights(defaultFields));
} else {
newFieldsBoosts = QueryParserHelper.resolveMappingFields(context, fieldsBoosts);
}
return multiMatchQuery.parse(type, newFieldsBoosts, value, minimumShouldMatch);
}

View File

@ -28,9 +28,8 @@ import java.util.List;
import java.util.Map;
import java.util.function.Function;
import org.elasticsearch.cli.Terminal;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsException;
@ -38,10 +37,8 @@ import org.elasticsearch.env.Environment;
public class InternalSettingsPreparer {
private static final String[] ALLOWED_SUFFIXES = {".yml", ".yaml", ".json"};
public static final String SECRET_PROMPT_VALUE = "${prompt.secret}";
public static final String TEXT_PROMPT_VALUE = "${prompt.text}";
private static final String SECRET_PROMPT_VALUE = "${prompt.secret}";
private static final String TEXT_PROMPT_VALUE = "${prompt.text}";
/**
* Prepares the settings by gathering all elasticsearch system properties and setting defaults.
@ -49,36 +46,29 @@ public class InternalSettingsPreparer {
public static Settings prepareSettings(Settings input) {
Settings.Builder output = Settings.builder();
initializeSettings(output, input, Collections.emptyMap());
finalizeSettings(output, null);
finalizeSettings(output);
return output.build();
}
/**
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings,
* and then replacing all property placeholders. If a {@link Terminal} is provided and configuration settings are loaded,
* settings with a value of <code>${prompt.text}</code> or <code>${prompt.secret}</code> will result in a prompt for
* the setting to the user.
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings.
*
* @param input The custom settings to use. These are not overwritten by settings in the configuration file.
* @param terminal the Terminal to use for input/output
* @return the {@link Settings} and {@link Environment} as a {@link Tuple}
*/
public static Environment prepareEnvironment(Settings input, Terminal terminal) {
return prepareEnvironment(input, terminal, Collections.emptyMap(), null);
public static Environment prepareEnvironment(Settings input) {
return prepareEnvironment(input, Collections.emptyMap(), null);
}
/**
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings,
* and then replacing all property placeholders. If a {@link Terminal} is provided and configuration settings are loaded,
* settings with a value of <code>${prompt.text}</code> or <code>${prompt.secret}</code> will result in a prompt for
* the setting to the user.
* Prepares the settings by gathering all elasticsearch system properties, optionally loading the configuration settings.
*
* @param input the custom settings to use; these are not overwritten by settings in the configuration file
* @param terminal the Terminal to use for input/output
* @param properties map of properties key/value pairs (usually from the command-line)
* @param configPath path to config directory; (use null to indicate the default)
* @return the {@link Settings} and {@link Environment} as a {@link Tuple}
*/
public static Environment prepareEnvironment(Settings input, Terminal terminal, Map<String, String> properties, Path configPath) {
public static Environment prepareEnvironment(Settings input, Map<String, String> properties, Path configPath) {
// just create enough settings to build the environment, to get the config dir
Settings.Builder output = Settings.builder();
initializeSettings(output, input, properties);
@ -104,7 +94,8 @@ public class InternalSettingsPreparer {
// re-initialize settings now that the config file has been loaded
initializeSettings(output, input, properties);
finalizeSettings(output, terminal);
checkSettingsForTerminalDeprecation(output);
finalizeSettings(output);
environment = new Environment(output.build(), configPath);
@ -128,10 +119,28 @@ public class InternalSettingsPreparer {
}
/**
* Finish preparing settings by replacing forced settings, prompts, and any defaults that need to be added.
* The provided terminal is used to prompt for settings needing to be replaced.
* Checks all settings values to make sure they do not have the old prompt settings. These were deprecated in 6.0.0.
* This check should be removed in 8.0.0.
*/
private static void finalizeSettings(Settings.Builder output, Terminal terminal) {
private static void checkSettingsForTerminalDeprecation(final Settings.Builder output) throws SettingsException {
// This method to be removed in 8.0.0, as it was deprecated in 6.0 and removed in 7.0
assert Version.CURRENT.major != 8: "Logic pertaining to config driven prompting should be removed";
for (String setting : output.keys()) {
switch (output.get(setting)) {
case SECRET_PROMPT_VALUE:
throw new SettingsException("Config driven secret prompting was deprecated in 6.0.0. Use the keystore" +
" for secure settings.");
case TEXT_PROMPT_VALUE:
throw new SettingsException("Config driven text prompting was deprecated in 6.0.0. Use the keystore" +
" for secure settings.");
}
}
}
/**
* Finish preparing settings by replacing forced settings and any defaults that need to be added.
*/
private static void finalizeSettings(Settings.Builder output) {
// allow to force set properties based on configuration of the settings provided
List<String> forcedSettings = new ArrayList<>();
for (String setting : output.keys()) {
@ -149,53 +158,5 @@ public class InternalSettingsPreparer {
if (output.get(ClusterName.CLUSTER_NAME_SETTING.getKey()) == null) {
output.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).value());
}
replacePromptPlaceholders(output, terminal);
}
private static void replacePromptPlaceholders(Settings.Builder settings, Terminal terminal) {
List<String> secretToPrompt = new ArrayList<>();
List<String> textToPrompt = new ArrayList<>();
for (String key : settings.keys()) {
switch (settings.get(key)) {
case SECRET_PROMPT_VALUE:
secretToPrompt.add(key);
break;
case TEXT_PROMPT_VALUE:
textToPrompt.add(key);
break;
}
}
for (String setting : secretToPrompt) {
String secretValue = promptForValue(setting, terminal, true);
if (Strings.hasLength(secretValue)) {
settings.put(setting, secretValue);
} else {
// TODO: why do we remove settings if prompt returns empty??
settings.remove(setting);
}
}
for (String setting : textToPrompt) {
String textValue = promptForValue(setting, terminal, false);
if (Strings.hasLength(textValue)) {
settings.put(setting, textValue);
} else {
// TODO: why do we remove settings if prompt returns empty??
settings.remove(setting);
}
}
}
private static String promptForValue(String key, Terminal terminal, boolean secret) {
if (terminal == null) {
throw new UnsupportedOperationException("found property [" + key + "] with value ["
+ (secret ? SECRET_PROMPT_VALUE : TEXT_PROMPT_VALUE)
+ "]. prompting for property values is only supported when running elasticsearch in the foreground");
}
if (secret) {
return new String(terminal.readSecret("Enter value for [" + key + "]: "));
}
return terminal.readText("Enter value for [" + key + "]: ");
}
}

View File

@ -238,7 +238,7 @@ public class Node implements Closeable {
* @param preparedSettings Base settings to configure the node with
*/
public Node(Settings preparedSettings) {
this(InternalSettingsPreparer.prepareEnvironment(preparedSettings, null));
this(InternalSettingsPreparer.prepareEnvironment(preparedSettings));
}
public Node(Environment environment) {

View File

@ -23,14 +23,23 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.MasterNodeRequest;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateApplier;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateTaskConfig;
import org.elasticsearch.cluster.ClusterStateTaskExecutor;
import org.elasticsearch.cluster.ClusterStateTaskListener;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus;
import org.elasticsearch.cluster.SnapshotsInProgress.State;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
@ -58,10 +67,6 @@ import org.elasticsearch.repositories.IndexId;
import org.elasticsearch.repositories.Repository;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.EmptyTransportResponseHandler;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
@ -85,9 +90,9 @@ import static org.elasticsearch.cluster.SnapshotsInProgress.completed;
* This service runs on data and master nodes and controls currently snapshotted shards on these nodes. It is responsible for
* starting and stopping shard level snapshots
*/
public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateApplier, IndexEventListener {
public class SnapshotShardsService extends AbstractLifecycleComponent implements ClusterStateListener, IndexEventListener {
public static final String UPDATE_SNAPSHOT_ACTION_NAME = "internal:cluster/snapshot/update_snapshot";
public static final String UPDATE_SNAPSHOT_STATUS_ACTION_NAME = "internal:cluster/snapshot/update_snapshot_status";
private final ClusterService clusterService;
@ -106,10 +111,12 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
private volatile Map<Snapshot, SnapshotShards> shardSnapshots = emptyMap();
private final SnapshotStateExecutor snapshotStateExecutor = new SnapshotStateExecutor();
private UpdateSnapshotStatusAction updateSnapshotStatusHandler;
@Inject
public SnapshotShardsService(Settings settings, ClusterService clusterService, SnapshotsService snapshotsService, ThreadPool threadPool,
TransportService transportService, IndicesService indicesService) {
TransportService transportService, IndicesService indicesService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings);
this.indicesService = indicesService;
this.snapshotsService = snapshotsService;
@ -118,20 +125,18 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
this.threadPool = threadPool;
if (DiscoveryNode.isDataNode(settings)) {
// this is only useful on the nodes that can hold data
// addLowPriorityApplier to make sure that Repository will be created before snapshot
clusterService.addLowPriorityApplier(this);
}
if (DiscoveryNode.isMasterNode(settings)) {
// This needs to run only on nodes that can become masters
transportService.registerRequestHandler(UPDATE_SNAPSHOT_ACTION_NAME, UpdateIndexShardSnapshotStatusRequest::new, ThreadPool.Names.SAME, new UpdateSnapshotStateRequestHandler());
clusterService.addListener(this);
}
// The constructor of UpdateSnapshotStatusAction will register itself to the TransportService.
this.updateSnapshotStatusHandler = new UpdateSnapshotStatusAction(settings, UPDATE_SNAPSHOT_STATUS_ACTION_NAME,
transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver);
}
@Override
protected void doStart() {
assert this.updateSnapshotStatusHandler != null;
assert transportService.getRequestHandler(UPDATE_SNAPSHOT_STATUS_ACTION_NAME) != null;
}
@Override
@ -151,11 +156,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
@Override
protected void doClose() {
clusterService.removeApplier(this);
clusterService.removeListener(this);
}
@Override
public void applyClusterState(ClusterChangedEvent event) {
public void clusterChanged(ClusterChangedEvent event) {
try {
SnapshotsInProgress prev = event.previousState().custom(SnapshotsInProgress.TYPE);
SnapshotsInProgress curr = event.state().custom(SnapshotsInProgress.TYPE);
@ -449,7 +454,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
/**
* Internal request that is used to send changes in snapshot status to master
*/
public static class UpdateIndexShardSnapshotStatusRequest extends TransportRequest {
public static class UpdateIndexShardSnapshotStatusRequest extends MasterNodeRequest<UpdateIndexShardSnapshotStatusRequest> {
private Snapshot snapshot;
private ShardId shardId;
private ShardSnapshotStatus status;
@ -462,6 +467,13 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
this.snapshot = snapshot;
this.shardId = shardId;
this.status = status;
// By default, we keep trying to post snapshot status messages to avoid snapshot processes getting stuck.
this.masterNodeTimeout = TimeValue.timeValueNanos(Long.MAX_VALUE);
}
@Override
public ActionRequestValidationException validate() {
return null;
}
@Override
@ -502,11 +514,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
* Updates the shard status
*/
public void updateIndexShardSnapshotStatus(Snapshot snapshot, ShardId shardId, ShardSnapshotStatus status, DiscoveryNode master) {
UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status);
try {
transportService.sendRequest(master, UPDATE_SNAPSHOT_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
UpdateIndexShardSnapshotStatusRequest request = new UpdateIndexShardSnapshotStatusRequest(snapshot, shardId, status);
transportService.sendRequest(transportService.getLocalNode(), UPDATE_SNAPSHOT_STATUS_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
} catch (Exception e) {
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", request.snapshot(), request.status()), e);
logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] [{}] failed to update snapshot state", snapshot, status), e);
}
}
@ -515,15 +527,24 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
*
* @param request update shard status request
*/
private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request) {
private void innerUpdateSnapshotState(final UpdateIndexShardSnapshotStatusRequest request, ActionListener<UpdateIndexShardSnapshotStatusResponse> listener) {
logger.trace("received updated snapshot restore state [{}]", request);
clusterService.submitStateUpdateTask(
"update snapshot state",
request,
ClusterStateTaskConfig.build(Priority.NORMAL),
snapshotStateExecutor,
(source, e) -> logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}][{}] failed to update snapshot status to [{}]",
request.snapshot(), request.shardId(), request.status()), e));
new ClusterStateTaskListener() {
@Override
public void onFailure(String source, Exception e) {
listener.onFailure(e);
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
listener.onResponse(new UpdateIndexShardSnapshotStatusResponse());
}
});
}
class SnapshotStateExecutor implements ClusterStateTaskExecutor<UpdateIndexShardSnapshotStatusRequest> {
@ -578,14 +599,34 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements
}
}
/**
* Transport request handler that is used to send changes in snapshot status to master
*/
class UpdateSnapshotStateRequestHandler implements TransportRequestHandler<UpdateIndexShardSnapshotStatusRequest> {
static class UpdateIndexShardSnapshotStatusResponse extends ActionResponse {
}
class UpdateSnapshotStatusAction extends TransportMasterNodeAction<UpdateIndexShardSnapshotStatusRequest, UpdateIndexShardSnapshotStatusResponse> {
UpdateSnapshotStatusAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService,
ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, UpdateIndexShardSnapshotStatusRequest::new);
}
@Override
public void messageReceived(UpdateIndexShardSnapshotStatusRequest request, final TransportChannel channel) throws Exception {
innerUpdateSnapshotState(request);
channel.sendResponse(TransportResponse.Empty.INSTANCE);
protected String executor() {
return ThreadPool.Names.SAME;
}
@Override
protected UpdateIndexShardSnapshotStatusResponse newResponse() {
return new UpdateIndexShardSnapshotStatusResponse();
}
@Override
protected void masterOperation(UpdateIndexShardSnapshotStatusRequest request, ClusterState state, ActionListener<UpdateIndexShardSnapshotStatusResponse> listener) throws Exception {
innerUpdateSnapshotState(request, listener);
}
@Override
protected ClusterBlockException checkBlock(UpdateIndexShardSnapshotStatusRequest request, ClusterState state) {
return null;
}
}

View File

@ -19,19 +19,18 @@
package org.elasticsearch.transport;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.unit.TimeValue;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ExecutionException;
@ -61,7 +60,7 @@ public interface TcpChannel extends Releasable {
*
* @param listener to be executed
*/
void addCloseListener(ActionListener<TcpChannel> listener);
void addCloseListener(ActionListener<Void> listener);
/**
@ -80,6 +79,22 @@ public interface TcpChannel extends Releasable {
*/
boolean isOpen();
/**
* Returns the local address for this channel.
*
* @return the local address of this channel.
*/
InetSocketAddress getLocalAddress();
/**
* Sends a tcp message to the channel. The listener will be executed once the send process has been
* completed.
*
* @param reference to send to channel
* @param listener to execute upon send completion
*/
void sendMessage(BytesReference reference, ActionListener<Void> listener);
/**
* Closes the channel.
*
@ -98,10 +113,10 @@ public interface TcpChannel extends Releasable {
*/
static <C extends TcpChannel> void closeChannels(List<C> channels, boolean blocking) {
if (blocking) {
ArrayList<ActionFuture<TcpChannel>> futures = new ArrayList<>(channels.size());
ArrayList<ActionFuture<Void>> futures = new ArrayList<>(channels.size());
for (final C channel : channels) {
if (channel.isOpen()) {
PlainActionFuture<TcpChannel> closeFuture = PlainActionFuture.newFuture();
PlainActionFuture<Void> closeFuture = PlainActionFuture.newFuture();
channel.addCloseListener(closeFuture);
channel.close();
futures.add(closeFuture);
@ -120,15 +135,14 @@ public interface TcpChannel extends Releasable {
* @param discoveryNode the node for the pending connections
* @param connectionFutures representing the pending connections
* @param connectTimeout to wait for a connection
* @param <C> the type of channel
* @throws ConnectTransportException if one of the connections fails
*/
static <C extends TcpChannel> void awaitConnected(DiscoveryNode discoveryNode, List<ActionFuture<C>> connectionFutures,
TimeValue connectTimeout) throws ConnectTransportException {
static void awaitConnected(DiscoveryNode discoveryNode, List<ActionFuture<Void>> connectionFutures, TimeValue connectTimeout)
throws ConnectTransportException {
Exception connectionException = null;
boolean allConnected = true;
for (ActionFuture<C> connectionFuture : connectionFutures) {
for (ActionFuture<Void> connectionFuture : connectionFutures) {
try {
connectionFuture.get(connectTimeout.getMillis(), TimeUnit.MILLISECONDS);
} catch (TimeoutException e) {
@ -153,8 +167,8 @@ public interface TcpChannel extends Releasable {
}
}
static void blockOnFutures(List<ActionFuture<TcpChannel>> futures) {
for (ActionFuture<TcpChannel> future : futures) {
static void blockOnFutures(List<ActionFuture<Void>> futures) {
for (ActionFuture<Void> future : futures) {
try {
future.get();
} catch (ExecutionException e) {

View File

@ -118,7 +118,7 @@ import static org.elasticsearch.common.transport.NetworkExceptionHelper.isCloseC
import static org.elasticsearch.common.transport.NetworkExceptionHelper.isConnectException;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractLifecycleComponent implements Transport {
public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport {
public static final String TRANSPORT_SERVER_WORKER_THREAD_NAME_PREFIX = "transport_server_worker";
public static final String TRANSPORT_CLIENT_BOSS_THREAD_NAME_PREFIX = "transport_client_boss";
@ -199,8 +199,8 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
protected final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
protected final ConcurrentMap<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
private final Map<String, List<Channel>> serverChannels = newConcurrentMap();
private final Set<Channel> acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
private final Map<String, List<TcpChannel>> serverChannels = newConcurrentMap();
private final Set<TcpChannel> acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
protected final KeyedLock<String> connectionLock = new KeyedLock<>();
private final NamedWriteableRegistry namedWriteableRegistry;
@ -278,13 +278,13 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
this.transportService = service;
}
private static class HandshakeResponseHandler<Channel> implements TransportResponseHandler<VersionHandshakeResponse> {
private static class HandshakeResponseHandler implements TransportResponseHandler<VersionHandshakeResponse> {
final AtomicReference<Version> versionRef = new AtomicReference<>();
final CountDownLatch latch = new CountDownLatch(1);
final AtomicReference<Exception> exceptionRef = new AtomicReference<>();
final Channel channel;
final TcpChannel channel;
HandshakeResponseHandler(Channel channel) {
HandshakeResponseHandler(TcpChannel channel) {
this.channel = channel;
}
@ -340,10 +340,10 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
for (Map.Entry<DiscoveryNode, NodeChannels> entry : connectedNodes.entrySet()) {
DiscoveryNode node = entry.getKey();
NodeChannels channels = entry.getValue();
for (Channel channel : channels.getChannels()) {
internalSendMessage(channel, pingHeader, new SendMetricListener<Channel>(pingHeader.length()) {
for (TcpChannel channel : channels.getChannels()) {
internalSendMessage(channel, pingHeader, new SendMetricListener(pingHeader.length()) {
@Override
protected void innerInnerOnResponse(Channel channel) {
protected void innerInnerOnResponse(Void v) {
successfulPings.inc();
}
@ -397,12 +397,12 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
public final class NodeChannels implements Connection {
private final Map<TransportRequestOptions.Type, ConnectionProfile.ConnectionTypeHandle> typeMapping;
private final List<Channel> channels;
private final List<TcpChannel> channels;
private final DiscoveryNode node;
private final AtomicBoolean closed = new AtomicBoolean(false);
private final Version version;
NodeChannels(DiscoveryNode node, List<Channel> channels, ConnectionProfile connectionProfile, Version handshakeVersion) {
NodeChannels(DiscoveryNode node, List<TcpChannel> channels, ConnectionProfile connectionProfile, Version handshakeVersion) {
this.node = node;
this.channels = Collections.unmodifiableList(channels);
assert channels.size() == connectionProfile.getNumConnections() : "expected channels size to be == "
@ -420,11 +420,11 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
return version;
}
public List<Channel> getChannels() {
public List<TcpChannel> getChannels() {
return channels;
}
public Channel channel(TransportRequestOptions.Type type) {
public TcpChannel channel(TransportRequestOptions.Type type) {
ConnectionProfile.ConnectionTypeHandle connectionTypeHandle = typeMapping.get(type);
if (connectionTypeHandle == null) {
throw new IllegalArgumentException("no type channel for [" + type + "]");
@ -477,7 +477,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
if (closed.get()) {
throw new NodeNotConnectedException(node, "connection already closed");
}
Channel channel = channel(options.type());
TcpChannel channel = channel(options.type());
sendRequestToChannel(this.node, channel, requestId, action, request, options, getVersion(), (byte) 0);
}
@ -594,13 +594,13 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
try {
int numConnections = connectionProfile.getNumConnections();
assert numConnections > 0 : "A connection profile must be configured with at least one connection";
List<Channel> channels = new ArrayList<>(numConnections);
List<ActionFuture<Channel>> connectionFutures = new ArrayList<>(numConnections);
List<TcpChannel> channels = new ArrayList<>(numConnections);
List<ActionFuture<Void>> connectionFutures = new ArrayList<>(numConnections);
for (int i = 0; i < numConnections; ++i) {
try {
PlainActionFuture<Channel> connectFuture = PlainActionFuture.newFuture();
PlainActionFuture<Void> connectFuture = PlainActionFuture.newFuture();
connectionFutures.add(connectFuture);
Channel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture);
TcpChannel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture);
channels.add(channel);
} catch (Exception e) {
// If there was an exception when attempting to instantiate the raw channels, we close all of the channels
@ -618,7 +618,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
}
// If we make it past the block above, we have successfully established connections for all of the channels
final Channel handshakeChannel = channels.get(0); // one channel is guaranteed by the connection profile
final TcpChannel handshakeChannel = channels.get(0); // one channel is guaranteed by the connection profile
handshakeChannel.addCloseListener(ActionListener.wrap(() -> cancelHandshakeForChannel(handshakeChannel)));
Version version;
try {
@ -635,7 +635,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
transportService.onConnectionOpened(nodeChannels);
final NodeChannels finalNodeChannels = nodeChannels;
final AtomicBoolean runOnce = new AtomicBoolean(false);
Consumer<Channel> onClose = c -> {
Consumer<TcpChannel> onClose = c -> {
assert c.isOpen() == false : "channel is still open when onClose is called";
// we only need to disconnect from the nodes once since all other channels
// will also try to run this we protect it from running multiple times.
@ -772,15 +772,15 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
final AtomicReference<InetSocketAddress> boundSocket = new AtomicReference<>();
boolean success = portsRange.iterate(portNumber -> {
try {
Channel channel = bind(name, new InetSocketAddress(hostAddress, portNumber));
TcpChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber));
synchronized (serverChannels) {
List<Channel> list = serverChannels.get(name);
List<TcpChannel> list = serverChannels.get(name);
if (list == null) {
list = new ArrayList<>();
serverChannels.put(name, list);
}
list.add(channel);
boundSocket.set(getLocalAddress(channel));
boundSocket.set(channel.getLocalAddress());
}
} catch (Exception e) {
lastException.set(e);
@ -937,10 +937,10 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
closeLock.writeLock().lock();
try {
// first stop to accept any incoming connections so nobody can connect to this transport
for (Map.Entry<String, List<Channel>> entry : serverChannels.entrySet()) {
for (Map.Entry<String, List<TcpChannel>> entry : serverChannels.entrySet()) {
String profile = entry.getKey();
List<Channel> channels = entry.getValue();
ActionListener<TcpChannel> closeFailLogger = ActionListener.wrap(c -> {},
List<TcpChannel> channels = entry.getValue();
ActionListener<Void> closeFailLogger = ActionListener.wrap(c -> {},
e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e));
channels.forEach(c -> c.addCloseListener(closeFailLogger));
TcpChannel.closeChannels(channels, true);
@ -979,7 +979,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
}
}
protected void onException(Channel channel, Exception e) {
protected void onException(TcpChannel channel, Exception e) {
if (!lifecycle.started()) {
// just close and ignore - we are already stopped and just need to make sure we release all resources
TcpChannel.closeChannel(channel, false);
@ -1014,9 +1014,9 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
// in case we are able to return data, serialize the exception content and sent it back to the client
if (channel.isOpen()) {
BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8));
final SendMetricListener<Channel> closeChannel = new SendMetricListener<Channel>(message.length()) {
final SendMetricListener closeChannel = new SendMetricListener(message.length()) {
@Override
protected void innerInnerOnResponse(Channel channel) {
protected void innerInnerOnResponse(Void v) {
TcpChannel.closeChannel(channel, false);
}
@ -1036,34 +1036,19 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
}
}
protected void serverAcceptedChannel(Channel channel) {
protected void serverAcceptedChannel(TcpChannel channel) {
boolean addedOnThisCall = acceptedChannels.add(channel);
assert addedOnThisCall : "Channel should only be added to accept channel set once";
channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel)));
}
/**
* Returns the channels local address
*/
protected abstract InetSocketAddress getLocalAddress(Channel channel);
/**
* Binds to the given {@link InetSocketAddress}
*
* @param name the profile name
* @param address the address to bind to
*/
protected abstract Channel bind(String name, InetSocketAddress address) throws IOException;
/**
* Sends message to channel. The listener's onResponse method will be called when the send is complete unless an exception
* is thrown during the send. If an exception is thrown, the listener's onException method will be called.
*
* @param channel the destination channel
* @param reference the byte reference for the message
* @param listener the listener to call when the operation has completed
*/
protected abstract void sendMessage(Channel channel, BytesReference reference, ActionListener<Channel> listener);
protected abstract TcpChannel bind(String name, InetSocketAddress address) throws IOException;
/**
* Initiate a single tcp socket channel to a node. Implementations do not have to observe the connectTimeout.
@ -1075,7 +1060,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
* @return the pending connection
* @throws IOException if an I/O exception occurs while opening the channel
*/
protected abstract Channel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Channel> connectListener)
protected abstract TcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> connectListener)
throws IOException;
/**
@ -1088,7 +1073,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
return compress && (!(request instanceof BytesTransportRequest));
}
private void sendRequestToChannel(final DiscoveryNode node, final Channel targetChannel, final long requestId, final String action,
private void sendRequestToChannel(final DiscoveryNode node, final TcpChannel channel, final long requestId, final String action,
final TransportRequest request, TransportRequestOptions options, Version channelVersion,
byte status) throws IOException,
TransportException {
@ -1120,9 +1105,9 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
BytesReference message = buildMessage(requestId, status, node.getVersion(), request, stream);
final TransportRequestOptions finalOptions = options;
// this might be called in a different thread
SendListener onRequestSent = new SendListener(stream,
SendListener onRequestSent = new SendListener(channel, stream,
() -> transportService.onRequestSent(node, requestId, action, request, finalOptions), message.length());
internalSendMessage(targetChannel, message, onRequestSent);
internalSendMessage(channel, message, onRequestSent);
addedReleaseListener = true;
} finally {
if (!addedReleaseListener) {
@ -1134,13 +1119,13 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
/**
* sends a message to the given channel, using the given callbacks.
*/
private void internalSendMessage(Channel targetChannel, BytesReference message, SendMetricListener<Channel> listener) {
private void internalSendMessage(TcpChannel channel, BytesReference message, SendMetricListener listener) {
try {
sendMessage(targetChannel, message, listener);
channel.sendMessage(message, listener);
} catch (Exception ex) {
// call listener to ensure that any resources are released
listener.onFailure(ex);
onException(targetChannel, ex);
onException(channel, ex);
}
}
@ -1153,12 +1138,12 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
* @param requestId the request ID this response replies to
* @param action the action this response replies to
*/
public void sendErrorResponse(Version nodeVersion, Channel channel, final Exception error, final long requestId,
public void sendErrorResponse(Version nodeVersion, TcpChannel channel, final Exception error, final long requestId,
final String action) throws IOException {
try (BytesStreamOutput stream = new BytesStreamOutput()) {
stream.setVersion(nodeVersion);
RemoteTransportException tx = new RemoteTransportException(
nodeName(), new TransportAddress(getLocalAddress(channel)), action, error);
nodeName(), new TransportAddress(channel.getLocalAddress()), action, error);
threadPool.getThreadContext().writeTo(stream);
stream.writeException(tx);
byte status = 0;
@ -1167,7 +1152,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
final BytesReference bytes = stream.bytes();
final BytesReference header = buildHeader(requestId, status, nodeVersion, bytes.length());
CompositeBytesReference message = new CompositeBytesReference(header, bytes);
SendListener onResponseSent = new SendListener(null,
SendListener onResponseSent = new SendListener(channel, null,
() -> transportService.onResponseSent(requestId, action, error), message.length());
internalSendMessage(channel, message, onResponseSent);
}
@ -1178,12 +1163,12 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
*
* @see #sendErrorResponse(Version, TcpChannel, Exception, long, String) for sending back errors to the caller
*/
public void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId,
public void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId,
final String action, TransportResponseOptions options) throws IOException {
sendResponse(nodeVersion, channel, response, requestId, action, options, (byte) 0);
}
private void sendResponse(Version nodeVersion, Channel channel, final TransportResponse response, final long requestId,
private void sendResponse(Version nodeVersion, TcpChannel channel, final TransportResponse response, final long requestId,
final String action, TransportResponseOptions options, byte status) throws IOException {
if (compress) {
options = TransportResponseOptions.builder(options).withCompress(true).build();
@ -1202,7 +1187,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
final TransportResponseOptions finalOptions = options;
// this might be called in a different thread
SendListener listener = new SendListener(stream,
SendListener listener = new SendListener(channel, stream,
() -> transportService.onResponseSent(requestId, action, response, finalOptions), message.length());
internalSendMessage(channel, message, listener);
addedReleaseListener = true;
@ -1355,7 +1340,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
/**
* This method handles the message receive part for both request and responses
*/
public final void messageReceived(BytesReference reference, Channel channel, String profileName,
public final void messageReceived(BytesReference reference, TcpChannel channel, String profileName,
InetSocketAddress remoteAddress, int messageLengthBytes) throws IOException {
final int totalMessageSize = messageLengthBytes + TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE;
readBytesMetric.inc(totalMessageSize);
@ -1494,8 +1479,9 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
});
}
protected String handleRequest(Channel channel, String profileName, final StreamInput stream, long requestId, int messageLengthBytes,
Version version, InetSocketAddress remoteAddress, byte status) throws IOException {
protected String handleRequest(TcpChannel channel, String profileName, final StreamInput stream, long requestId,
int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status)
throws IOException {
final String action = stream.readString();
transportService.onRequestReceived(requestId, action);
TransportChannel transportChannel = null;
@ -1514,7 +1500,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
} else {
getInFlightRequestBreaker().addWithoutBreaking(messageLengthBytes);
}
transportChannel = new TcpTransportChannel<>(this, channel, transportName, action, requestId, version, profileName,
transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName,
messageLengthBytes);
final TransportRequest request = reg.newRequest(stream);
request.remoteAddress(new TransportAddress(remoteAddress));
@ -1525,7 +1511,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
} catch (Exception e) {
// the circuit breaker tripped
if (transportChannel == null) {
transportChannel = new TcpTransportChannel<>(this, channel, transportName, action, requestId, version, profileName, 0);
transportChannel = new TcpTransportChannel(this, channel, transportName, action, requestId, version, profileName, 0);
}
try {
transportChannel.sendResponse(e);
@ -1611,7 +1597,8 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
}
}
protected Version executeHandshake(DiscoveryNode node, Channel channel, TimeValue timeout) throws IOException, InterruptedException {
protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout)
throws IOException, InterruptedException {
numHandshakes.inc();
final long requestId = newRequestId();
final HandshakeResponseHandler handler = new HandshakeResponseHandler(channel);
@ -1671,7 +1658,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
/**
* Called once the channel is closed for instance due to a disconnect or a closed socket etc.
*/
private void cancelHandshakeForChannel(Channel channel) {
private void cancelHandshakeForChannel(TcpChannel channel) {
final Optional<Long> first = pendingHandshakes.entrySet().stream()
.filter((entry) -> entry.getValue().channel == channel).map(Map.Entry::getKey).findFirst();
if (first.isPresent()) {
@ -1699,7 +1686,7 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
/**
* This listener increments the transmitted bytes metric on success.
*/
private abstract class SendMetricListener<T> extends NotifyOnceListener<T> {
private abstract class SendMetricListener extends NotifyOnceListener<Void> {
private final long messageSize;
private SendMetricListener(long messageSize) {
@ -1707,31 +1694,34 @@ public abstract class TcpTransport<Channel extends TcpChannel> extends AbstractL
}
@Override
protected final void innerOnResponse(T object) {
protected final void innerOnResponse(Void object) {
transmittedBytesMetric.inc(messageSize);
innerInnerOnResponse(object);
}
protected abstract void innerInnerOnResponse(T object);
protected abstract void innerInnerOnResponse(Void object);
}
private final class SendListener extends SendMetricListener<Channel> {
private final class SendListener extends SendMetricListener {
private final TcpChannel channel;
private final Releasable optionalReleasable;
private final Runnable transportAdaptorCallback;
private SendListener(Releasable optionalReleasable, Runnable transportAdaptorCallback, long messageLength) {
private SendListener(TcpChannel channel, Releasable optionalReleasable, Runnable transportAdaptorCallback, long messageLength) {
super(messageLength);
this.channel = channel;
this.optionalReleasable = optionalReleasable;
this.transportAdaptorCallback = transportAdaptorCallback;
}
@Override
protected void innerInnerOnResponse(Channel channel) {
protected void innerInnerOnResponse(Void v) {
release();
}
@Override
protected void innerOnFailure(Exception e) {
logger.warn(() -> new ParameterizedMessage("send message failed [channel: {}]", channel), e);
release();
}

View File

@ -23,8 +23,8 @@ import org.elasticsearch.Version;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicBoolean;
public final class TcpTransportChannel<Channel extends TcpChannel> implements TransportChannel {
private final TcpTransport<Channel> transport;
public final class TcpTransportChannel implements TransportChannel {
private final TcpTransport transport;
private final Version version;
private final String action;
private final long requestId;
@ -32,9 +32,9 @@ public final class TcpTransportChannel<Channel extends TcpChannel> implements Tr
private final long reservedBytes;
private final AtomicBoolean released = new AtomicBoolean();
private final String channelType;
private final Channel channel;
private final TcpChannel channel;
TcpTransportChannel(TcpTransport<Channel> transport, Channel channel, String channelType, String action,
TcpTransportChannel(TcpTransport transport, TcpChannel channel, String channelType, String action,
long requestId, Version version, String profileName, long reservedBytes) {
this.version = version;
this.channel = channel;
@ -97,7 +97,7 @@ public final class TcpTransportChannel<Channel extends TcpChannel> implements Tr
return version;
}
public Channel getChannel() {
public TcpChannel getChannel() {
return channel;
}
}

View File

@ -30,7 +30,7 @@ public class BuildTests extends ESTestCase {
/** Asking for the jar metadata should not throw exception in tests, no matter how configured */
public void testJarMetadata() throws IOException {
URL url = Build.getElasticsearchCodebase();
URL url = Build.getElasticsearchCodeSourceLocation();
// throws exception if does not exist, or we cannot access it
try (InputStream ignored = FileSystemUtils.openFileURLStream(url)) {}
// these should never be null

View File

@ -337,7 +337,7 @@ public class VersionTests extends ESTestCase {
assertTrue(isCompatible(Version.V_5_6_0, Version.V_6_0_0_alpha2));
assertFalse(isCompatible(Version.fromId(2000099), Version.V_6_0_0_alpha2));
assertFalse(isCompatible(Version.fromId(2000099), Version.V_5_0_0));
assertTrue(isCompatible(Version.fromString("6.1.0"), Version.fromString("7.0.0")));
assertFalse(isCompatible(Version.fromString("6.0.0"), Version.fromString("7.0.0")));
assertFalse(isCompatible(Version.fromString("6.0.0-alpha1"), Version.fromString("7.0.0")));
assertFalse("only compatible with the latest minor",
isCompatible(VersionUtils.getPreviousMinorVersion(), Version.fromString("7.0.0")));

View File

@ -292,7 +292,7 @@ public class BulkRequestTests extends ESTestCase {
builder.field("_index", "index");
builder.field("_type", "type");
builder.field("_id", "id");
builder.field("_version", 1L);
builder.field("version", 1L);
builder.endObject();
builder.endObject();
}
@ -301,7 +301,7 @@ public class BulkRequestTests extends ESTestCase {
builder.startObject();
builder.field("doc", "{}");
Map<String,Object> values = new HashMap<>();
values.put("_version", 2L);
values.put("version", 2L);
values.put("_index", "index");
values.put("_type", "type");
builder.field("upsert", values);

View File

@ -37,7 +37,7 @@ public class AddFileKeyStoreCommandTests extends KeyStoreCommandTestCase {
protected Command newCommand() {
return new AddFileKeyStoreCommand() {
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
return env;
}
};

View File

@ -39,7 +39,7 @@ public class AddStringKeyStoreCommandTests extends KeyStoreCommandTestCase {
protected Command newCommand() {
return new AddStringKeyStoreCommand() {
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
return env;
}
@Override

View File

@ -35,7 +35,7 @@ public class CreateKeyStoreCommandTests extends KeyStoreCommandTestCase {
protected Command newCommand() {
return new CreateKeyStoreCommand() {
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
return env;
}
};

View File

@ -35,7 +35,7 @@ public class ListKeyStoreCommandTests extends KeyStoreCommandTestCase {
protected Command newCommand() {
return new ListKeyStoreCommand() {
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
return env;
}
};

View File

@ -36,7 +36,7 @@ public class RemoveSettingKeyStoreCommandTests extends KeyStoreCommandTestCase {
protected Command newCommand() {
return new RemoveSettingKeyStoreCommand() {
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
return env;
}
};

View File

@ -87,16 +87,6 @@ public class SettingsTests extends ESTestCase {
assertThat(implicitEnvSettings.get("setting1"), equalTo(hostname));
}
public void testReplacePropertiesPlaceholderIgnoresPrompt() {
Settings settings = Settings.builder()
.put("setting1", "${prompt.text}")
.put("setting2", "${prompt.secret}")
.replacePropertyPlaceholders()
.build();
assertThat(settings.get("setting1"), is("${prompt.text}"));
assertThat(settings.get("setting2"), is("${prompt.secret}"));
}
public void testGetAsSettings() {
Settings settings = Settings.builder()
.put("bar", "hello world")

View File

@ -67,7 +67,7 @@ import static org.hamcrest.Matchers.instanceOf;
public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLikeThisQueryBuilder> {
private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[]{Item.Field.DOC.getPreferredName()};
private static final String[] SHUFFLE_PROTECTED_FIELDS = new String[]{MoreLikeThisQueryBuilder.DOC.getPreferredName()};
private static String[] randomFields;
private static Item[] randomLikeItems;
@ -222,7 +222,7 @@ public class MoreLikeThisQueryBuilderTests extends AbstractQueryTestCase<MoreLik
@Override
protected Set<String> getObjectsHoldingArbitraryContent() {
//doc contains arbitrary content, anything can be added to it and no exception will be thrown
return Collections.singleton(MoreLikeThisQueryBuilder.Item.Field.DOC.getPreferredName());
return Collections.singleton(MoreLikeThisQueryBuilder.DOC.getPreferredName());
}
@Override

View File

@ -32,8 +32,10 @@ import org.apache.lucene.search.PhraseQuery;
import org.apache.lucene.search.PointRangeQuery;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.Fuzziness;
import org.elasticsearch.index.query.MultiMatchQueryBuilder.Type;
import org.elasticsearch.index.search.MatchQuery;
@ -41,6 +43,7 @@ import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@ -66,18 +69,28 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
assumeTrue("test with date fields runs only when at least a type is registered", getCurrentTypes().length > 0);
}
// creates the query with random value and field name
Object value;
final Object value;
if (fieldName.equals(STRING_FIELD_NAME)) {
value = getRandomQueryText();
} else {
value = getRandomValueForFieldName(fieldName);
}
MultiMatchQueryBuilder query = new MultiMatchQueryBuilder(value, fieldName);
// field with random boost
if (randomBoolean()) {
query.field(fieldName, randomFloat() * 10);
final MultiMatchQueryBuilder query;
if (rarely()) {
query = new MultiMatchQueryBuilder(value, fieldName);
if (randomBoolean()) {
query.lenient(randomBoolean());
}
// field with random boost
if (randomBoolean()) {
query.field(fieldName, randomFloat() * 10);
}
} else {
query = new MultiMatchQueryBuilder(value);
query.lenient(true);
}
// sets other parameters of the multi match query
if (randomBoolean()) {
query.type(randomFrom(MultiMatchQueryBuilder.Type.values()));
@ -112,9 +125,6 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
if (randomBoolean()) {
query.tieBreaker(randomFloat());
}
if (randomBoolean()) {
query.lenient(randomBoolean());
}
if (randomBoolean()) {
query.cutoffFrequency((float) 10 / randomIntBetween(1, 100));
}
@ -338,4 +348,56 @@ public class MultiMatchQueryBuilderTests extends AbstractQueryTestCase<MultiMatc
assertEquals(expected, query);
}
public void testDefaultField() throws Exception {
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
QueryShardContext context = createShardContext();
MultiMatchQueryBuilder builder = new MultiMatchQueryBuilder("hello");
// should pass because we set lenient to true when default field is `*`
Query query = builder.toQuery(context);
assertThat(query, instanceOf(DisjunctionMaxQuery.class));
context.getIndexSettings().updateIndexMetaData(
newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5").build())
);
MultiMatchQueryBuilder qb = new MultiMatchQueryBuilder("hello");
query = qb.toQuery(context);
DisjunctionMaxQuery expected = new DisjunctionMaxQuery(
Arrays.asList(
new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f)
), 0.0f
);
assertEquals(expected, query);
context.getIndexSettings().updateIndexMetaData(
newIndexMeta("index", context.getIndexSettings().getSettings(), Settings.builder().putList("index.query.default_field",
STRING_FIELD_NAME, STRING_FIELD_NAME_2 + "^5", INT_FIELD_NAME).build())
);
// should fail because lenient defaults to false
IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, () -> qb.toQuery(context));
assertThat(exc, instanceOf(NumberFormatException.class));
assertThat(exc.getMessage(), equalTo("For input string: \"hello\""));
// explicitly sets lenient
qb.lenient(true);
query = qb.toQuery(context);
expected = new DisjunctionMaxQuery(
Arrays.asList(
new TermQuery(new Term(STRING_FIELD_NAME, "hello")),
new BoostQuery(new TermQuery(new Term(STRING_FIELD_NAME_2, "hello")), 5.0f),
new MatchNoDocsQuery("failed [mapped_int] query, caused by number_format_exception:[For input string: \"hello\"]")
), 0.0f
);
assertEquals(expected, query);
}
private static IndexMetaData newIndexMeta(String name, Settings oldIndexSettings, Settings indexSettings) {
Settings build = Settings.builder().put(oldIndexSettings)
.put(indexSettings)
.build();
return IndexMetaData.builder(name).settings(build).build();
}
}

View File

@ -67,7 +67,7 @@ public class InternalSettingsPreparerTests extends ESTestCase {
assertNotNull(settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())); // a cluster name was set
int size = settings.names().size();
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null);
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings);
settings = env.settings();
assertNull(settings.get("node.name")); // a name was not set
assertNotNull(settings.get(ClusterName.CLUSTER_NAME_SETTING.getKey())); // a cluster name was set
@ -84,57 +84,6 @@ public class InternalSettingsPreparerTests extends ESTestCase {
assertEquals("foobar", settings.get("cluster.name"));
}
public void testReplacePromptPlaceholders() {
MockTerminal terminal = new MockTerminal();
terminal.addTextInput("text");
terminal.addSecretInput("replaced");
Settings.Builder builder = Settings.builder()
.put(baseEnvSettings)
.put("password.replace", InternalSettingsPreparer.SECRET_PROMPT_VALUE)
.put("dont.replace", "prompt:secret")
.put("dont.replace2", "_prompt:secret_")
.put("dont.replace3", "_prompt:text__")
.put("dont.replace4", "__prompt:text_")
.put("dont.replace5", "prompt:secret__")
.put("replace_me", InternalSettingsPreparer.TEXT_PROMPT_VALUE);
Settings settings = InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal).settings();
assertThat(settings.get("password.replace"), equalTo("replaced"));
assertThat(settings.get("replace_me"), equalTo("text"));
// verify other values unchanged
assertThat(settings.get("dont.replace"), equalTo("prompt:secret"));
assertThat(settings.get("dont.replace2"), equalTo("_prompt:secret_"));
assertThat(settings.get("dont.replace3"), equalTo("_prompt:text__"));
assertThat(settings.get("dont.replace4"), equalTo("__prompt:text_"));
assertThat(settings.get("dont.replace5"), equalTo("prompt:secret__"));
}
public void testReplaceSecretPromptPlaceholderWithNullTerminal() {
Settings.Builder builder = Settings.builder()
.put(baseEnvSettings)
.put("replace_me1", InternalSettingsPreparer.SECRET_PROMPT_VALUE);
try {
InternalSettingsPreparer.prepareEnvironment(builder.build(), null);
fail("an exception should have been thrown since no terminal was provided!");
} catch (UnsupportedOperationException e) {
assertThat(e.getMessage(), containsString("with value [" + InternalSettingsPreparer.SECRET_PROMPT_VALUE + "]"));
}
}
public void testReplaceTextPromptPlaceholderWithNullTerminal() {
Settings.Builder builder = Settings.builder()
.put(baseEnvSettings)
.put("replace_me1", InternalSettingsPreparer.TEXT_PROMPT_VALUE);
try {
InternalSettingsPreparer.prepareEnvironment(builder.build(), null);
fail("an exception should have been thrown since no terminal was provided!");
} catch (UnsupportedOperationException e) {
assertThat(e.getMessage(), containsString("with value [" + InternalSettingsPreparer.TEXT_PROMPT_VALUE + "]"));
}
}
public void testGarbageIsNotSwallowed() throws IOException {
try {
InputStream garbage = getClass().getResourceAsStream("/config/garbage/garbage.yml");
@ -144,7 +93,7 @@ public class InternalSettingsPreparerTests extends ESTestCase {
Files.copy(garbage, config.resolve("elasticsearch.yml"));
InternalSettingsPreparer.prepareEnvironment(Settings.builder()
.put(baseEnvSettings)
.build(), null);
.build());
} catch (SettingsException e) {
assertEquals("Failed to load settings from [elasticsearch.yml]", e.getMessage());
}
@ -156,7 +105,7 @@ public class InternalSettingsPreparerTests extends ESTestCase {
Files.createDirectory(config);
Files.copy(yaml, config.resolve("elasticsearch.yaml"));
SettingsException e = expectThrows(SettingsException.class, () ->
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build(), null));
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build()));
assertEquals("elasticsearch.yaml was deprecated in 5.5.0 and must be renamed to elasticsearch.yml", e.getMessage());
}
@ -166,7 +115,7 @@ public class InternalSettingsPreparerTests extends ESTestCase {
Files.createDirectory(config);
Files.copy(yaml, config.resolve("elasticsearch.json"));
SettingsException e = expectThrows(SettingsException.class, () ->
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build(), null));
InternalSettingsPreparer.prepareEnvironment(Settings.builder().put(baseEnvSettings).build()));
assertEquals("elasticsearch.json was deprecated in 5.5.0 and must be converted to elasticsearch.yml", e.getMessage());
}
@ -174,14 +123,14 @@ public class InternalSettingsPreparerTests extends ESTestCase {
MockSecureSettings secureSettings = new MockSecureSettings();
secureSettings.setString("foo", "secret");
Settings input = Settings.builder().put(baseEnvSettings).setSecureSettings(secureSettings).build();
Environment env = InternalSettingsPreparer.prepareEnvironment(input, null);
Environment env = InternalSettingsPreparer.prepareEnvironment(input);
Setting<SecureString> fakeSetting = SecureSetting.secureString("foo", null);
assertEquals("secret", fakeSetting.get(env.settings()).toString());
}
public void testDefaultPropertiesDoNothing() throws Exception {
Map<String, String> props = Collections.singletonMap("default.setting", "foo");
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, null, props, null);
Environment env = InternalSettingsPreparer.prepareEnvironment(baseEnvSettings, props, null);
assertEquals("foo", env.settings().get("default.setting"));
assertNull(env.settings().get("setting"));
}

View File

@ -48,6 +48,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDI
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.index.query.QueryBuilders.geoIntersectionQuery;
import static org.elasticsearch.index.query.QueryBuilders.geoShapeQuery;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.geo.RandomShapeGenerator.createGeometryCollectionWithin;
import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomPoint;
import static org.elasticsearch.test.geo.RandomShapeGenerator.xRandomRectangle;
@ -463,9 +464,43 @@ public class GeoShapeQueryTests extends ESSingleNodeTestCase {
// test that point was inserted
SearchResponse response = client().prepareSearch("geo_points_only").setTypes("type1")
.setQuery(geoIntersectionQuery("location", shape))
.setQuery(matchAllQuery())
.execute().actionGet();
assertEquals(1, response.getHits().getTotalHits());
}
public void testPointsOnlyExplicit() throws Exception {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type1")
.startObject("properties").startObject("location")
.field("type", "geo_shape")
.field("tree", randomBoolean() ? "quadtree" : "geohash")
.field("tree_levels", "6")
.field("distance_error_pct", "0.01")
.field("points_only", true)
.endObject().endObject()
.endObject().endObject().string();
client().admin().indices().prepareCreate("geo_points_only").addMapping("type1", mapping, XContentType.JSON).execute().actionGet();
ensureGreen();
// MULTIPOINT
ShapeBuilder shape = RandomShapeGenerator.createShape(random(), RandomShapeGenerator.ShapeType.MULTIPOINT);
client().prepareIndex("geo_points_only", "type1", "1")
.setSource(jsonBuilder().startObject().field("location", shape).endObject())
.setRefreshPolicy(IMMEDIATE).get();
// POINT
shape = RandomShapeGenerator.createShape(random(), RandomShapeGenerator.ShapeType.POINT);
client().prepareIndex("geo_points_only", "type1", "2")
.setSource(jsonBuilder().startObject().field("location", shape).endObject())
.setRefreshPolicy(IMMEDIATE).get();
// test that point was inserted
SearchResponse response = client().prepareSearch("geo_points_only").setTypes("type1")
.setQuery(matchAllQuery())
.execute().actionGet();
assertEquals(2, response.getHits().getTotalHits());
}
}

View File

@ -0,0 +1,115 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.snapshots;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.snapshots.mockstore.MockRepository;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.disruption.NetworkDisruption;
import org.elasticsearch.test.transport.MockTransportService;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.everyItem;
import static org.hamcrest.Matchers.hasSize;
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, transportClientRatio = 0)
public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase {
@Override
protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(MockRepository.Plugin.class, MockTransportService.TestPlugin.class);
}
public void testRetryPostingSnapshotStatusMessages() throws Exception {
String masterNode = internalCluster().startMasterOnlyNode();
String dataNode = internalCluster().startDataOnlyNode();
logger.info("--> creating repository");
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
.setType("mock").setSettings(Settings.builder()
.put("location", randomRepoPath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
final int shards = between(1, 10);
assertAcked(prepareCreate("test-index", 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0)));
ensureGreen();
final int numDocs = scaledRandomIntBetween(50, 100);
for (int i = 0; i < numDocs; i++) {
index("test-index", "doc", Integer.toString(i));
}
logger.info("--> blocking repository");
String blockedNode = blockNodeWithIndex("test-repo", "test-index");
dataNodeClient().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
.setWaitForCompletion(false)
.setIndices("test-index")
.get();
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
final SnapshotId snapshotId = client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap")
.get().getSnapshots().get(0).snapshotId();
logger.info("--> start disrupting cluster");
final NetworkDisruption networkDisruption = new NetworkDisruption(new NetworkDisruption.TwoPartitions(masterNode, dataNode),
NetworkDisruption.NetworkDelay.random(random()));
internalCluster().setDisruptionScheme(networkDisruption);
networkDisruption.startDisrupting();
logger.info("--> unblocking repository");
unblockNode("test-repo", blockedNode);
// Retrieve snapshot status from the data node.
SnapshotShardsService snapshotShardsService = internalCluster().getInstance(SnapshotShardsService.class, blockedNode);
assertBusy(() -> {
final Snapshot snapshot = new Snapshot("test-repo", snapshotId);
List<IndexShardSnapshotStatus.Stage> stages = snapshotShardsService.currentSnapshotShards(snapshot)
.values().stream().map(IndexShardSnapshotStatus::stage).collect(Collectors.toList());
assertThat(stages, hasSize(shards));
assertThat(stages, everyItem(equalTo(IndexShardSnapshotStatus.Stage.DONE)));
});
logger.info("--> stop disrupting cluster");
networkDisruption.stopDisrupting();
internalCluster().clearDisruptionScheme(true);
assertBusy(() -> {
GetSnapshotsResponse snapshotsStatusResponse = client().admin().cluster()
.prepareGetSnapshots("test-repo")
.setSnapshots("test-snap").get();
SnapshotInfo snapshotInfo = snapshotsStatusResponse.getSnapshots().get(0);
logger.info("Snapshot status [{}], successfulShards [{}]", snapshotInfo.state(), snapshotInfo.successfulShards());
assertThat(snapshotInfo.state(), equalTo(SnapshotState.SUCCESS));
assertThat(snapshotInfo.successfulShards(), equalTo(shards));
}, 10, TimeUnit.SECONDS);
}
}

View File

@ -39,7 +39,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static org.hamcrest.Matchers.equalTo;
@ -172,18 +171,13 @@ public class TcpTransportTests extends ESTestCase {
public void testCompressRequest() throws IOException {
final boolean compressed = randomBoolean();
final AtomicBoolean called = new AtomicBoolean(false);
Req request = new Req(randomRealisticUnicodeOfLengthBetween(10, 100));
ThreadPool threadPool = new TestThreadPool(TcpTransportTests.class.getName());
AtomicReference<IOException> exceptionReference = new AtomicReference<>();
AtomicReference<BytesReference> messageCaptor = new AtomicReference<>();
try {
TcpTransport<FakeChannel> transport = new TcpTransport<FakeChannel>(
TcpTransport transport = new TcpTransport(
"test", Settings.builder().put("transport.tcp.compress", compressed).build(), threadPool,
new BigArrays(Settings.EMPTY, null), null, null, null) {
@Override
protected InetSocketAddress getLocalAddress(FakeChannel o) {
return null;
}
@Override
protected FakeChannel bind(String name, InetSocketAddress address) throws IOException {
@ -191,38 +185,9 @@ public class TcpTransportTests extends ESTestCase {
}
@Override
protected void sendMessage(FakeChannel o, BytesReference reference, ActionListener listener) {
try {
StreamInput streamIn = reference.streamInput();
streamIn.skip(TcpHeader.MARKER_BYTES_SIZE);
int len = streamIn.readInt();
long requestId = streamIn.readLong();
assertEquals(42, requestId);
byte status = streamIn.readByte();
Version version = Version.fromId(streamIn.readInt());
assertEquals(Version.CURRENT, version);
assertEquals(compressed, TransportStatus.isCompress(status));
called.compareAndSet(false, true);
if (compressed) {
final int bytesConsumed = TcpHeader.HEADER_SIZE;
streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed))
.streamInput(streamIn);
}
threadPool.getThreadContext().readHeaders(streamIn);
assertEquals("foobar", streamIn.readString());
Req readReq = new Req("");
readReq.readFrom(streamIn);
assertEquals(request.value, readReq.value);
} catch (IOException e) {
exceptionReference.set(e);
}
}
@Override
protected FakeChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout,
ActionListener<FakeChannel> connectListener) throws IOException {
FakeChannel fakeChannel = new FakeChannel();
return fakeChannel;
protected FakeChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> connectListener)
throws IOException {
return new FakeChannel(messageCaptor);
}
@Override
@ -233,18 +198,41 @@ public class TcpTransportTests extends ESTestCase {
@Override
public NodeChannels getConnection(DiscoveryNode node) {
int numConnections = MockTcpTransport.LIGHT_PROFILE.getNumConnections();
ArrayList<FakeChannel> fakeChannels = new ArrayList<>(numConnections);
ArrayList<TcpChannel> fakeChannels = new ArrayList<>(numConnections);
for (int i = 0; i < numConnections; ++i) {
fakeChannels.add(new FakeChannel());
fakeChannels.add(new FakeChannel(messageCaptor));
}
return new NodeChannels(node, fakeChannels, MockTcpTransport.LIGHT_PROFILE, Version.CURRENT);
}
};
DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), Version.CURRENT);
Transport.Connection connection = transport.getConnection(node);
connection.sendRequest(42, "foobar", request, TransportRequestOptions.EMPTY);
assertTrue(called.get());
assertNull("IOException while sending message.", exceptionReference.get());
BytesReference reference = messageCaptor.get();
assertNotNull(reference);
StreamInput streamIn = reference.streamInput();
streamIn.skip(TcpHeader.MARKER_BYTES_SIZE);
int len = streamIn.readInt();
long requestId = streamIn.readLong();
assertEquals(42, requestId);
byte status = streamIn.readByte();
Version version = Version.fromId(streamIn.readInt());
assertEquals(Version.CURRENT, version);
assertEquals(compressed, TransportStatus.isCompress(status));
if (compressed) {
final int bytesConsumed = TcpHeader.HEADER_SIZE;
streamIn = CompressorFactory.compressor(reference.slice(bytesConsumed, reference.length() - bytesConsumed))
.streamInput(streamIn);
}
threadPool.getThreadContext().readHeaders(streamIn);
assertEquals("foobar", streamIn.readString());
Req readReq = new Req("");
readReq.readFrom(streamIn);
assertEquals(request.value, readReq.value);
} finally {
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
@ -252,12 +240,18 @@ public class TcpTransportTests extends ESTestCase {
private static final class FakeChannel implements TcpChannel {
private final AtomicReference<BytesReference> messageCaptor;
FakeChannel(AtomicReference<BytesReference> messageCaptor) {
this.messageCaptor = messageCaptor;
}
@Override
public void close() {
}
@Override
public void addCloseListener(ActionListener<TcpChannel> listener) {
public void addCloseListener(ActionListener<Void> listener) {
}
@Override
@ -268,6 +262,16 @@ public class TcpTransportTests extends ESTestCase {
public boolean isOpen() {
return false;
}
@Override
public InetSocketAddress getLocalAddress() {
return null;
}
@Override
public void sendMessage(BytesReference reference, ActionListener<Void> listener) {
messageCaptor.set(reference);
}
}
private static final class Req extends TransportRequest {

View File

@ -1,4 +1,4 @@
{ "update" : {"_id" : "1", "_retry_on_conflict" : 2} }
{ "update" : {"_id" : "1", "retry_on_conflict" : 2} }
{ "doc" : {"field" : "value"} }
{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1" } }
{ "script" : { "source" : "counter += param1", "lang" : "javascript", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}}

View File

@ -21,6 +21,7 @@ apply plugin: 'elasticsearch.build'
dependencies {
provided "org.elasticsearch:elasticsearch:${version}"
provided "org.elasticsearch:elasticsearch-cli:${version}"
testCompile "org.elasticsearch.test:framework:${version}"
testCompile 'com.google.jimfs:jimfs:1.1'
testCompile 'com.google.guava:guava:18.0'

View File

@ -69,7 +69,7 @@ public class ListPluginsCommandTests extends ESTestCase {
MockTerminal terminal = new MockTerminal();
int status = new ListPluginsCommand() {
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
Settings.Builder builder = Settings.builder().put("path.home", home);
settings.forEach((k,v) -> builder.put(k, v));
final Settings realSettings = builder.build();

View File

@ -57,7 +57,7 @@ public class RemovePluginCommandTests extends ESTestCase {
}
@Override
protected Environment createEnv(Terminal terminal, Map<String, String> settings) throws UserException {
protected Environment createEnv(Map<String, String> settings) throws UserException {
return env;
}

View File

@ -59,23 +59,28 @@ POST /sales/docs/_bulk?refresh
For instance the following document:
```
[source,js]
--------------------------------------------------
{
"keyword": ["foo", "bar"],
"number": [23, 65, 76]
}
```
--------------------------------------------------
// NOTCONSOLE
\... creates the following composite buckets when `keyword` and `number` are used as values source
for the aggregation:
```
[source,js]
--------------------------------------------------
{ "keyword": "foo", "number": 23 }
{ "keyword": "foo", "number": 65 }
{ "keyword": "foo", "number": 76 }
{ "keyword": "bar", "number": 23 }
{ "keyword": "bar", "number": 65 }
{ "keyword": "bar", "number": 76 }
```
--------------------------------------------------
// NOTCONSOLE
==== Values source

View File

@ -201,16 +201,16 @@ chunks, as this will slow things down.
=== Versioning
Each bulk item can include the version value using the
`_version`/`version` field. It automatically follows the behavior of the
`version` field. It automatically follows the behavior of the
index / delete operation based on the `_version` mapping. It also
support the `version_type`/`_version_type` (see <<index-versioning, versioning>>)
support the `version_type` (see <<index-versioning, versioning>>)
[float]
[[bulk-routing]]
=== Routing
Each bulk item can include the routing value using the
`_routing`/`routing` field. It automatically follows the behavior of the
`routing` field. It automatically follows the behavior of the
index / delete operation based on the `_routing` mapping.
[float]
@ -234,7 +234,7 @@ Control when the changes made by this request are visible to search. See
[[bulk-update]]
=== Update
When using `update` action `_retry_on_conflict` can be used as field in
When using `update` action `retry_on_conflict` can be used as field in
the action itself (not in the extra payload line), to specify how many
times an update should be retried in the case of a version conflict.
@ -246,11 +246,11 @@ the options. Example with update actions:
[source,js]
--------------------------------------------------
POST _bulk
{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1", "retry_on_conflict" : 3} }
{ "doc" : {"field" : "value"} }
{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1", "retry_on_conflict" : 3} }
{ "script" : { "source": "ctx._source.counter += params.param1", "lang" : "painless", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}}
{ "update" : {"_id" : "2", "_type" : "type1", "_index" : "index1", "_retry_on_conflict" : 3} }
{ "update" : {"_id" : "2", "_type" : "type1", "_index" : "index1", "retry_on_conflict" : 3} }
{ "doc" : {"field" : "value"}, "doc_as_upsert" : true }
{ "update" : {"_id" : "3", "_type" : "type1", "_index" : "index1", "_source" : true} }
{ "doc" : {"field" : "value"} }

View File

@ -59,7 +59,7 @@ Example to delete with routing
[source,js]
--------------------------------------------------
PUT /twitter/tweet/1?routing=kimhcy
PUT /twitter/tweet/1?routing=kimchy
{
"test": "test"
}

View File

@ -230,7 +230,7 @@ GET /_mget?routing=key1
"_index" : "test",
"_type" : "type",
"_id" : "1",
"_routing" : "key2"
"routing" : "key2"
},
{
"_index" : "test",

View File

@ -64,7 +64,7 @@ A type used to be a logical category/partition of your index to allow you to sto
[float]
=== Document
A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is an ubiquitous internet data interchange format.
A document is a basic unit of information that can be indexed. For example, you can have a document for a single customer, another document for a single product, and yet another for a single order. This document is expressed in http://json.org/[JSON] (JavaScript Object Notation) which is a ubiquitous internet data interchange format.
Within an index/type, you can store as many documents as you want. Note that although a document physically resides in an index, a document actually must be indexed/assigned to a type inside an index.

View File

@ -30,6 +30,8 @@ way to reindex old indices is to use the `reindex` API.
* <<breaking_70_mappings_changes>>
* <<breaking_70_search_changes>>
* <<breaking_70_plugins_changes>>
* <<breaking_70_api_changes>>
include::migrate_7_0/aggregations.asciidoc[]
include::migrate_7_0/cluster.asciidoc[]
@ -37,3 +39,5 @@ include::migrate_7_0/indices.asciidoc[]
include::migrate_7_0/mappings.asciidoc[]
include::migrate_7_0/search.asciidoc[]
include::migrate_7_0/plugins.asciidoc[]
include::migrate_7_0/api.asciidoc[]

View File

@ -0,0 +1,24 @@
[[breaking_70_api_changes]]
=== Breaking changes in 7.0
==== Camel case and underscore parameters deprecated in 6.x have been removed
A number of duplicate parameters deprecated in 6.x have been removed from
Bulk request, Multi Get request, Term Vectors request, and More Like This Query
requests.
The following camel case parameters have been removed:
* `opType`
* `versionType`, `_versionType`
The following parameters starting with underscore have been removed:
* `_parent`
* `_retry_on_conflict`
* `_routing`
* `_version`
* `_version_type`
Instead of these removed parameters, use their non camel case equivalents without
starting underscore, e.g. use `version_type` instead of `_version_type` or `versionType`.

View File

@ -58,6 +58,11 @@ GET /_search
<1> The `subject` field is three times as important as the `message` field.
If no `fields` are provided, the `multi_match` query defaults to the `index.query.default_field`
index settings, which in turn defaults to `*`. `*` extracts all fields in the mapping that
are eligible to term queries and filters the metadata fields. All extracted fields are then
combined to build a query.
WARNING: There is a limit of no more than 1024 fields being queried at once.
[[multi-match-types]]

View File

@ -78,30 +78,3 @@ variable, for instance:
node.name: ${HOSTNAME}
network.host: ${ES_NETWORK_HOST}
--------------------------------------------------
[float]
=== Prompting for settings
For settings that you do not wish to store in the configuration file, you can
use the value `${prompt.text}` or `${prompt.secret}` and start Elasticsearch
in the foreground. `${prompt.secret}` has echoing disabled so that the value
entered will not be shown in your terminal; `${prompt.text}` will allow you to
see the value as you type it in. For example:
[source,yaml]
--------------------------------------------------
node:
name: ${prompt.text}
--------------------------------------------------
When starting Elasticsearch, you will be prompted to enter the actual value
like so:
[source,sh]
--------------------------------------------------
Enter value for [node.name]:
--------------------------------------------------
NOTE: Elasticsearch will not start if `${prompt.text}` or `${prompt.secret}`
is used in the settings and the process is run as a service or in the background.

View File

@ -42,7 +42,6 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.SuppressForbidden;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.lease.Releasables;
@ -57,6 +56,7 @@ import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportRequestOptions;
@ -79,7 +79,7 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
* longer. Med is for the typical search / single doc index. And High for things like cluster state. Ping is reserved for
* sending out ping requests to other nodes.
*/
public class Netty4Transport extends TcpTransport<NettyTcpChannel> {
public class Netty4Transport extends TcpTransport {
static {
Netty4Utils.setup();
@ -249,7 +249,7 @@ public class Netty4Transport extends TcpTransport<NettyTcpChannel> {
}
@Override
protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<NettyTcpChannel> listener)
protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> listener)
throws IOException {
ChannelFuture channelFuture = bootstrap.connect(node.getAddress().address());
Channel channel = channelFuture.channel();
@ -264,7 +264,7 @@ public class Netty4Transport extends TcpTransport<NettyTcpChannel> {
channelFuture.addListener(f -> {
if (f.isSuccess()) {
listener.onResponse(nettyChannel);
listener.onResponse(null);
} else {
Throwable cause = f.cause();
if (cause instanceof Error) {
@ -279,28 +279,6 @@ public class Netty4Transport extends TcpTransport<NettyTcpChannel> {
return nettyChannel;
}
@Override
protected void sendMessage(NettyTcpChannel channel, BytesReference reference, ActionListener<NettyTcpChannel> listener) {
final ChannelFuture future = channel.getLowLevelChannel().writeAndFlush(Netty4Utils.toByteBuf(reference));
future.addListener(f -> {
if (f.isSuccess()) {
listener.onResponse(channel);
} else {
final Throwable cause = f.cause();
Netty4Utils.maybeDie(cause);
logger.warn((Supplier<?>) () ->
new ParameterizedMessage("write and flush on the network layer failed (channel: {})", channel), cause);
assert cause instanceof Exception;
listener.onFailure((Exception) cause);
}
});
}
@Override
protected InetSocketAddress getLocalAddress(NettyTcpChannel channel) {
return (InetSocketAddress) channel.getLowLevelChannel().localAddress();
}
@Override
protected NettyTcpChannel bind(String name, InetSocketAddress address) {
Channel channel = serverBootstraps.get(name).bind(address).syncUninterruptibly().channel();

View File

@ -20,22 +20,27 @@
package org.elasticsearch.transport.netty4;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelOption;
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.transport.TcpChannel;
import java.net.InetSocketAddress;
import java.util.concurrent.CompletableFuture;
public class NettyTcpChannel implements TcpChannel {
private final Channel channel;
private final CompletableFuture<TcpChannel> closeContext = new CompletableFuture<>();
private final CompletableFuture<Void> closeContext = new CompletableFuture<>();
NettyTcpChannel(Channel channel) {
this.channel = channel;
this.channel.closeFuture().addListener(f -> {
if (f.isSuccess()) {
closeContext.complete(this);
closeContext.complete(null);
} else {
Throwable cause = f.cause();
if (cause instanceof Error) {
@ -48,17 +53,13 @@ public class NettyTcpChannel implements TcpChannel {
});
}
public Channel getLowLevelChannel() {
return channel;
}
@Override
public void close() {
channel.close();
}
@Override
public void addCloseListener(ActionListener<TcpChannel> listener) {
public void addCloseListener(ActionListener<Void> listener) {
closeContext.whenComplete(ActionListener.toBiConsumer(listener));
}
@ -71,4 +72,28 @@ public class NettyTcpChannel implements TcpChannel {
public boolean isOpen() {
return channel.isOpen();
}
@Override
public InetSocketAddress getLocalAddress() {
return (InetSocketAddress) channel.localAddress();
}
@Override
public void sendMessage(BytesReference reference, ActionListener<Void> listener) {
final ChannelFuture future = channel.writeAndFlush(Netty4Utils.toByteBuf(reference));
future.addListener(f -> {
if (f.isSuccess()) {
listener.onResponse(null);
} else {
final Throwable cause = f.cause();
Netty4Utils.maybeDie(cause);
assert cause instanceof Exception;
listener.onFailure((Exception) cause);
}
});
}
public Channel getLowLevelChannel() {
return channel;
}
}

View File

@ -36,6 +36,7 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpChannel;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.Transport;
@ -108,7 +109,8 @@ public class Netty4TransportIT extends ESNetty4IntegTestCase {
super(settings, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService);
}
protected String handleRequest(NettyTcpChannel channel, String profileName,
@Override
protected String handleRequest(TcpChannel channel, String profileName,
StreamInput stream, long requestId, int messageLengthBytes, Version version,
InetSocketAddress remoteAddress, byte status) throws IOException {
String action = super.handleRequest(channel, profileName, stream, requestId, messageLengthBytes, version,

View File

@ -30,7 +30,6 @@ import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TcpTransport;
import org.elasticsearch.transport.TransportService;
import org.junit.Before;
import java.util.Collections;
@ -59,7 +58,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
.build();
ThreadPool threadPool = new TestThreadPool("tst");
try (TcpTransport<?> transport = startTransport(settings, threadPool)) {
try (TcpTransport transport = startTransport(settings, threadPool)) {
assertEquals(1, transport.profileBoundAddresses().size());
assertEquals(1, transport.boundAddress().boundAddresses().length);
} finally {
@ -75,7 +74,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
.build();
ThreadPool threadPool = new TestThreadPool("tst");
try (TcpTransport<?> transport = startTransport(settings, threadPool)) {
try (TcpTransport transport = startTransport(settings, threadPool)) {
assertEquals(1, transport.profileBoundAddresses().size());
assertEquals(1, transport.boundAddress().boundAddresses().length);
} finally {
@ -108,7 +107,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
.build();
ThreadPool threadPool = new TestThreadPool("tst");
try (TcpTransport<?> transport = startTransport(settings, threadPool)) {
try (TcpTransport transport = startTransport(settings, threadPool)) {
assertEquals(0, transport.profileBoundAddresses().size());
assertEquals(1, transport.boundAddress().boundAddresses().length);
} finally {
@ -116,9 +115,9 @@ public class NettyTransportMultiPortTests extends ESTestCase {
}
}
private TcpTransport<?> startTransport(Settings settings, ThreadPool threadPool) {
private TcpTransport startTransport(Settings settings, ThreadPool threadPool) {
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
TcpTransport<?> transport = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()),
TcpTransport transport = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()),
bigArrays, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService());
transport.start();

View File

@ -58,7 +58,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
BigArrays.NON_RECYCLING_INSTANCE, namedWriteableRegistry, new NoneCircuitBreakerService()) {
@Override
protected Version executeHandshake(DiscoveryNode node, NettyTcpChannel channel, TimeValue timeout) throws IOException,
protected Version executeHandshake(DiscoveryNode node, TcpChannel channel, TimeValue timeout) throws IOException,
InterruptedException {
if (doHandshake) {
return super.executeHandshake(node, channel, timeout);
@ -90,7 +90,7 @@ public class SimpleNetty4TransportTests extends AbstractSimpleTransportTestCase
protected void closeConnectionChannel(Transport transport, Transport.Connection connection) throws IOException {
final Netty4Transport t = (Netty4Transport) transport;
@SuppressWarnings("unchecked")
final TcpTransport<NettyTcpChannel>.NodeChannels channels = (TcpTransport<NettyTcpChannel>.NodeChannels) connection;
final TcpTransport.NodeChannels channels = (TcpTransport.NodeChannels) connection;
TcpChannel.closeChannels(channels.getChannels().subList(0, randomIntBetween(1, channels.getChannels().size())), true);
}

View File

@ -49,11 +49,11 @@ public class EvilCommandTests extends ESTestCase {
};
final MockTerminal terminal = new MockTerminal();
command.main(new String[0], terminal);
assertNotNull(command.shutdownHookThread.get());
assertNotNull(command.getShutdownHookThread());
// successful removal here asserts that the runtime hook was installed in Command#main
assertTrue(Runtime.getRuntime().removeShutdownHook(command.shutdownHookThread.get()));
command.shutdownHookThread.get().run();
command.shutdownHookThread.get().join();
assertTrue(Runtime.getRuntime().removeShutdownHook(command.getShutdownHookThread()));
command.getShutdownHookThread().run();
command.getShutdownHookThread().join();
assertTrue(closed.get());
final String output = terminal.getOutput();
if (shouldThrow) {

View File

@ -57,6 +57,11 @@ for (Version version : wireCompatVersions) {
if (project.bwc_tests_enabled) {
bwcTest.dependsOn(versionBwcTest)
}
/* To support taking index snapshots, we have to set path.repo setting */
tasks.getByName("${baseName}#mixedClusterTestRunner").configure {
systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo")
}
}
test.enabled = false // no unit tests for rolling upgrades, only the rest integration test

View File

@ -27,6 +27,7 @@ import org.elasticsearch.client.RestClient;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.seqno.SeqNoStats;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.yaml.ObjectPath;
@ -42,7 +43,9 @@ import java.util.stream.Collectors;
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.not;
public class IndexingIT extends ESRestTestCase {
@ -237,6 +240,57 @@ public class IndexingIT extends ESRestTestCase {
}
}
public void testUpdateSnapshotStatus() throws Exception {
Nodes nodes = buildNodeAndVersions();
assertThat(nodes.getNewNodes(), not(empty()));
logger.info("cluster discovered: {}", nodes.toString());
// Create the repository before taking the snapshot.
String repoConfig = JsonXContent.contentBuilder()
.startObject()
.field("type", "fs")
.startObject("settings")
.field("compress", randomBoolean())
.field("location", System.getProperty("tests.path.repo"))
.endObject()
.endObject()
.string();
assertOK(
client().performRequest("PUT", "/_snapshot/repo", emptyMap(),
new StringEntity(repoConfig, ContentType.APPLICATION_JSON))
);
String bwcNames = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.joining(","));
// Allocating shards on the BWC nodes to makes sure that taking snapshot happens on those nodes.
Settings.Builder settings = Settings.builder()
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), between(5, 10))
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
.put("index.routing.allocation.include._name", bwcNames);
final String index = "test-snapshot-index";
createIndex(index, settings.build());
indexDocs(index, 0, between(50, 100));
ensureGreen();
assertOK(client().performRequest("POST", index + "/_refresh"));
assertOK(
client().performRequest("PUT", "/_snapshot/repo/bwc-snapshot", singletonMap("wait_for_completion", "true"),
new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON))
);
// Allocating shards on all nodes, taking snapshots should happen on all nodes.
updateIndexSetting(index, Settings.builder().putNull("index.routing.allocation.include._name"));
ensureGreen();
assertOK(client().performRequest("POST", index + "/_refresh"));
assertOK(
client().performRequest("PUT", "/_snapshot/repo/mixed-snapshot", singletonMap("wait_for_completion", "true"),
new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON))
);
}
private void assertCount(final String index, final String preference, final int expectedCount) throws IOException {
final Response response = client().performRequest("GET", index + "/_count", Collections.singletonMap("preference", preference));
assertOK(response);

View File

@ -0,0 +1,26 @@
---
"Deprecated parameters should fail in Bulk query":
- skip:
version: " - 6.99.99"
reason: some parameters are removed starting from 7.0, their equivalents without underscore are used instead
features: "warnings"
- do:
catch: bad_request
bulk:
body: |
{ "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_version": 1 } }
{ "doc": { "f1": "v1" } }
{ "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2", "_version": 1 } }
{ "doc": { "f1": "v2" } }
- do:
catch: bad_request
bulk:
body: |
{ "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_1", "_routing": "test1" } }
{ "doc": { "f1": "v1" } }
{ "update": { "_index": "test_index", "_type": "test_type", "_id": "test_id_2", "_routing": "test1" } }
{ "doc": { "f1": "v2" } }

View File

@ -0,0 +1,38 @@
---
"Deprecated parameters should fail in Multi Get query":
- skip:
version: " - 6.99.99"
reason: _version, _routing are removed starting from 7.0, their equivalents without underscore are used instead
features: "warnings"
- do:
index:
index: test_1
type: test
id: 1
body: { foo: bar }
- do:
index:
index: test_1
type: test
id: 2
body: { foo: baz }
- do:
catch: bad_request
mget:
body:
docs:
- { _index: test_1, _type: test, _id: 1, _routing : test1 }
- { _index: test_1, _type: test, _id: 2, _routing : test1 }
- do:
catch: bad_request
mget:
body:
docs:
- { _index: test_1, _type: test, _id: 1, _version : 1 }
- { _index: test_1, _type: test, _id: 2, _version : 1 }

View File

@ -0,0 +1,52 @@
---
"Deprecated camel case and _ parameters should fail in Term Vectors query":
- skip:
version: " - 6.99.99"
reason: camel case and _ parameters (e.g. versionType, _version_type) should fail from 7.0
features: "warnings"
- do:
indices.create:
index: testidx
body:
mappings:
testtype:
properties:
text:
type : "text"
term_vector : "with_positions_offsets"
- do:
index:
index: testidx
type: testtype
id: testing_document
body: {"text" : "The quick brown fox is brown."}
- do:
catch: bad_request
mtermvectors:
"term_statistics" : true
"body" :
"docs":
-
"_index" : "testidx"
"_type" : "testtype"
"_id" : "testing_document"
"version" : 1
"versionType" : "external"
- do:
catch: bad_request
mtermvectors:
"term_statistics" : true
"body" :
"docs":
-
"_index" : "testidx"
"_type" : "testtype"
"_id" : "testing_document"
"version" : 1
"_version_type" : "external"

View File

@ -41,3 +41,60 @@ setup:
- match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.offset: 0 }
- is_false: hits.hits.0.inner_hits.nested_field.hits.hits.0._nested.child
---
"Nested doc version and seqIDs":
- skip:
# fixed in 6.0.1
version: " - 6.0.0"
reason: "version and seq IDs where not accurate in previous versions"
- do:
index:
index: test
type: type_1
id: 1
body:
"nested_field" : [ { "foo": "bar" } ]
- do:
indices.refresh: {}
- do:
search:
body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": ["_seq_no"]} }}, "version": true, "docvalue_fields" : ["_seq_no"] }
- match: { hits.total: 1 }
- match: { hits.hits.0._index: "test" }
- match: { hits.hits.0._type: "type_1" }
- match: { hits.hits.0._id: "1" }
- match: { hits.hits.0._version: 1 }
- match: { hits.hits.0.fields._seq_no: [0] }
- match: { hits.hits.0.inner_hits.nested_field.hits.hits.0.fields._seq_no: [0] }
- do:
index:
index: test
type: type_1
id: 1
body:
"nested_field" : [ { "foo": "baz" } ]
- do:
indices.refresh: {}
- do:
search:
body: { "query" : { "nested" : { "path" : "nested_field", "query" : { "match_all" : {} }, "inner_hits" : { version: true, "docvalue_fields": ["_seq_no"]} }}, "version": true, "docvalue_fields" : ["_seq_no"] }
- match: { hits.total: 1 }
- match: { hits.hits.0._index: "test" }
- match: { hits.hits.0._type: "type_1" }
- match: { hits.hits.0._id: "1" }
- match: { hits.hits.0._version: 2 }
- match: { hits.hits.0.fields._seq_no: [1] }
- match: { hits.hits.0.inner_hits.nested_field.hits.hits.0._version: 2 }
- match: { hits.hits.0.inner_hits.nested_field.hits.hits.0.fields._seq_no: [1] }

View File

@ -5,6 +5,7 @@ List projects = [
'build-tools',
'rest-api-spec',
'core',
'core:cli',
'docs',
'client:rest',
'client:rest-high-level',

View File

@ -22,6 +22,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks;
dependencies {
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
compile "org.elasticsearch:elasticsearch:${version}"
compile "org.elasticsearch:elasticsearch-cli:${version}"
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
compile "junit:junit:${versions.junit}"
compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"

View File

@ -51,7 +51,7 @@ abstract class ESElasticsearchCliTestCase extends ESTestCase {
final AtomicBoolean init = new AtomicBoolean();
final int status = Elasticsearch.main(args, new Elasticsearch() {
@Override
protected Environment createEnv(final Terminal terminal, final Map<String, String> settings) throws UserException {
protected Environment createEnv(final Map<String, String> settings) throws UserException {
Settings.Builder builder = Settings.builder().put("path.home", home);
settings.forEach((k,v) -> builder.put(k, v));
final Settings realSettings = builder.build();

View File

@ -66,7 +66,7 @@ public class MockNode extends Node {
}
public MockNode(Settings settings, Collection<Class<? extends Plugin>> classpathPlugins, Path configPath) {
this(InternalSettingsPreparer.prepareEnvironment(settings, null, Collections.emptyMap(), configPath), classpathPlugins);
this(InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), configPath), classpathPlugins);
}
public MockNode(Environment environment, Collection<Class<? extends Plugin>> classpathPlugins) {

View File

@ -1026,7 +1026,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
ServiceHolder(Settings nodeSettings, Settings indexSettings,
Collection<Class<? extends Plugin>> plugins, AbstractQueryTestCase<?> testCase) throws IOException {
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, null);
Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings);
PluginsService pluginsService;
pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins);

View File

@ -23,10 +23,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.collect.Tuple;
import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.Optional;
@ -49,72 +46,64 @@ public class VersionUtils {
* guarantees in v1 and versions without the guranteees in v2
*/
static Tuple<List<Version>, List<Version>> resolveReleasedVersions(Version current, Class<?> versionClass) {
Field[] fields = versionClass.getFields();
List<Version> versions = new ArrayList<>(fields.length);
for (final Field field : fields) {
final int mod = field.getModifiers();
if (false == Modifier.isStatic(mod) && Modifier.isFinal(mod) && Modifier.isPublic(mod)) {
continue;
}
if (field.getType() != Version.class) {
continue;
}
if ("CURRENT".equals(field.getName())) {
continue;
}
assert field.getName().matches("V(_\\d+)+(_(alpha|beta|rc)\\d+)?") : field.getName();
try {
versions.add(((Version) field.get(null)));
} catch (final IllegalAccessException e) {
throw new RuntimeException(e);
}
}
Collections.sort(versions);
List<Version> versions = Version.getDeclaredVersions(versionClass);
Version last = versions.remove(versions.size() - 1);
assert last.equals(current) : "The highest version must be the current one "
+ "but was [" + versions.get(versions.size() - 1) + "] and current was [" + current + "]";
+ "but was [" + last + "] and current was [" + current + "]";
if (current.revision != 0) {
/* If we are in a stable branch there should be no unreleased version constants
* because we don't expect to release any new versions in older branches. If there
* are extra constants then gradle will yell about it. */
/* In the 5.x series prior to 5.6, unreleased version constants had an
* `_UNRELEASED` suffix, and when making the first release on a minor release
* branch the last, unreleased, version constant from the previous minor branch
* was dropped. After 5.6, there is no `_UNRELEASED` suffix on version constants'
* names and, additionally, they are not dropped when a new minor release branch
* starts.
*
* This means that in 6.x and later series the last release _in each
* minor branch_ is unreleased, whereas in 5.x it's more complicated: There were
* (sometimes, and sometimes multiple) minor branches containing no releases, each
* of which contains a single version constant of the form 5.n.0, and these
* branches always followed a branch that _did_ contain a version of the
* form 5.m.p (p>0). All versions strictly before the last 5.m version are released,
* and all other 5.* versions are unreleased.
*/
if (current.major == 5 && current.revision != 0) {
/* The current (i.e. latest) version is 5.a.b, b nonzero, which
* means that all other versions are released. */
return new Tuple<>(unmodifiableList(versions), singletonList(current));
}
/* If we are on a patch release then we know that at least the version before the
* current one is unreleased. If it is released then gradle would be complaining. */
int unreleasedIndex = versions.size() - 1;
while (true) {
if (unreleasedIndex < 0) {
throw new IllegalArgumentException("Couldn't find first non-alpha release");
final List<Version> unreleased = new ArrayList<>();
unreleased.add(current);
Version prevConsideredVersion = current;
for (int i = versions.size() - 1; i >= 0; i--) {
Version currConsideredVersion = versions.get(i);
if (currConsideredVersion.major == 5) {
unreleased.add(currConsideredVersion);
versions.remove(i);
if (currConsideredVersion.revision != 0) {
/* Currently considering the latest version in the 5.x series,
* which is (a) unreleased and (b) the only such. So we're done. */
break;
}
/* ... else we're on a version of the form 5.n.0, and have not yet
* considered a version of the form 5.n.m (m>0), so this entire branch
* is unreleased, so carry on looking for a branch containing releases.
*/
} else if (currConsideredVersion.major != prevConsideredVersion.major
|| currConsideredVersion.minor != prevConsideredVersion.minor) {
/* Have moved to the end of a new minor branch, so this is
* an unreleased version. */
unreleased.add(currConsideredVersion);
versions.remove(i);
}
/* We don't support backwards compatibility for alphas, betas, and rcs. But
* they were released so we add them to the released list. Usually this doesn't
* matter to consumers, but consumers that do care should filter non-release
* versions. */
if (versions.get(unreleasedIndex).isRelease()) {
break;
}
unreleasedIndex--;
prevConsideredVersion = currConsideredVersion;
}
Version unreleased = versions.remove(unreleasedIndex);
if (unreleased.revision == 0) {
/*
* If the last unreleased version is itself a patch release then Gradle enforces that there is yet another unreleased version
* before that. However, we have to skip alpha/betas/RCs too (e.g., consider when the version constants are ..., 5.6.3, 5.6.4,
* 6.0.0-alpha1, ..., 6.0.0-rc1, 6.0.0-rc2, 6.0.0, 6.1.0 on the 6.x branch. In this case, we will have pruned 6.0.0 and 6.1.0 as
* unreleased versions, but we also need to prune 5.6.4. At this point though, unreleasedIndex will be pointing to 6.0.0-rc2, so
* we have to skip backwards until we find a non-alpha/beta/RC again. Then we can prune that version as an unreleased version
* too.
*/
do {
unreleasedIndex--;
} while (versions.get(unreleasedIndex).isRelease() == false);
Version earlierUnreleased = versions.remove(unreleasedIndex);
return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(earlierUnreleased, unreleased, current)));
}
return new Tuple<>(unmodifiableList(versions), unmodifiableList(Arrays.asList(unreleased, current)));
Collections.reverse(unreleased);
return new Tuple<>(unmodifiableList(versions), unmodifiableList(unreleased));
}
private static final List<Version> RELEASED_VERSIONS;

View File

@ -165,7 +165,7 @@ public class ClientYamlTestClient {
Header[] requestHeaders = new Header[headers.size()];
int index = 0;
for (Map.Entry<String, String> header : headers.entrySet()) {
logger.info("Adding header {}\n with value {}", header.getKey(), header.getValue());
logger.info("Adding header {} with value {}", header.getKey(), header.getValue());
requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue());
}

View File

@ -1976,7 +1976,7 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
MockTcpTransport transport = new MockTcpTransport(Settings.EMPTY, threadPool, BigArrays.NON_RECYCLING_INSTANCE,
new NoneCircuitBreakerService(), namedWriteableRegistry, new NetworkService(Collections.emptyList())) {
@Override
protected String handleRequest(MockChannel mockChannel, String profileName, StreamInput stream, long requestId,
protected String handleRequest(TcpChannel mockChannel, String profileName, StreamInput stream, long requestId,
int messageLengthBytes, Version version, InetSocketAddress remoteAddress, byte status)
throws IOException {
return super.handleRequest(mockChannel, profileName, stream, requestId, messageLengthBytes, version, remoteAddress,

View File

@ -68,7 +68,7 @@ import java.util.function.Consumer;
* that need real networking. This implementation is a test only implementation that implements
* the networking layer in the worst possible way since it blocks and uses a thread per request model.
*/
public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel> {
public class MockTcpTransport extends TcpTransport {
/**
* A pre-built light connection profile that shares a single connection across all
@ -109,11 +109,6 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
this.mockVersion = mockVersion;
}
@Override
protected InetSocketAddress getLocalAddress(MockChannel mockChannel) {
return mockChannel.localAddress;
}
@Override
protected MockChannel bind(final String name, InetSocketAddress address) throws IOException {
MockServerSocket socket = new MockServerSocket();
@ -176,7 +171,7 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
}
@Override
protected MockChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<MockChannel> connectListener)
protected MockChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> connectListener)
throws IOException {
InetSocketAddress address = node.getAddress().address();
final MockSocket socket = new MockSocket();
@ -191,7 +186,7 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
MockChannel channel = new MockChannel(socket, address, "none", (c) -> {});
channel.loopRead(executor);
success = true;
connectListener.onResponse(channel);
connectListener.onResponse(null);
return channel;
} finally {
if (success == false) {
@ -222,22 +217,6 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
socket.setReuseAddress(TCP_REUSE_ADDRESS.get(settings));
}
@Override
protected void sendMessage(MockChannel mockChannel, BytesReference reference, ActionListener<MockChannel> listener) {
try {
synchronized (mockChannel) {
final Socket socket = mockChannel.activeChannel;
OutputStream outputStream = new BufferedOutputStream(socket.getOutputStream());
reference.writeTo(outputStream);
outputStream.flush();
}
listener.onResponse(mockChannel);
} catch (IOException e) {
listener.onFailure(e);
onException(mockChannel, e);
}
}
@Override
public long getNumOpenServerConnections() {
return 1;
@ -252,7 +231,7 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
private final String profile;
private final CancellableThreads cancellableThreads = new CancellableThreads();
private final Closeable onClose;
private final CompletableFuture<TcpChannel> closeFuture = new CompletableFuture<>();
private final CompletableFuture<Void> closeFuture = new CompletableFuture<>();
/**
* Constructs a new MockChannel instance intended for handling the actual incoming / outgoing traffic.
@ -377,14 +356,14 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
public void close() {
try {
close0();
closeFuture.complete(this);
closeFuture.complete(null);
} catch (IOException e) {
closeFuture.completeExceptionally(e);
}
}
@Override
public void addCloseListener(ActionListener<TcpChannel> listener) {
public void addCloseListener(ActionListener<Void> listener) {
closeFuture.whenComplete(ActionListener.toBiConsumer(listener));
}
@ -401,6 +380,25 @@ public class MockTcpTransport extends TcpTransport<MockTcpTransport.MockChannel>
return isOpen.get();
}
@Override
public InetSocketAddress getLocalAddress() {
return localAddress;
}
@Override
public void sendMessage(BytesReference reference, ActionListener<Void> listener) {
try {
synchronized (this) {
OutputStream outputStream = new BufferedOutputStream(activeChannel.getOutputStream());
reference.writeTo(outputStream);
outputStream.flush();
}
listener.onResponse(null);
} catch (IOException e) {
listener.onFailure(e);
onException(this, e);
}
}
}

View File

@ -22,13 +22,9 @@ package org.elasticsearch.transport.nio;
import org.elasticsearch.transport.nio.channel.NioServerSocketChannel;
import java.io.IOException;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.ClosedSelectorException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
@ -93,7 +89,6 @@ public class AcceptingSelector extends ESSelector {
newChannel.register();
SelectionKey selectionKey = newChannel.getSelectionKey();
selectionKey.attach(newChannel);
addRegisteredChannel(newChannel);
eventHandler.serverChannelRegistered(newChannel);
} else {
eventHandler.registrationException(newChannel, new ClosedChannelException());

View File

@ -28,14 +28,13 @@ import java.nio.channels.CancelledKeyException;
import java.nio.channels.ClosedSelectorException;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;
/**
* This is a basic selector abstraction used by {@link org.elasticsearch.transport.nio.NioTransport}. This
@ -56,7 +55,6 @@ public abstract class ESSelector implements Closeable {
private final CountDownLatch exitedLoop = new CountDownLatch(1);
private final AtomicBoolean isClosed = new AtomicBoolean(false);
private final PlainActionFuture<Boolean> isRunningFuture = PlainActionFuture.newFuture();
private final Set<NioChannel> registeredChannels = Collections.newSetFromMap(new ConcurrentHashMap<NioChannel, Boolean>());
private volatile Thread thread;
ESSelector(EventHandler eventHandler) throws IOException {
@ -134,7 +132,7 @@ public abstract class ESSelector implements Closeable {
void cleanupAndCloseChannels() {
cleanup();
channelsToClose.addAll(registeredChannels);
channelsToClose.addAll(selector.keys().stream().map(sk -> (NioChannel) sk.attachment()).collect(Collectors.toList()));
closePendingChannels();
}
@ -171,19 +169,6 @@ public abstract class ESSelector implements Closeable {
selector.wakeup();
}
public Set<NioChannel> getRegisteredChannels() {
return registeredChannels;
}
public void addRegisteredChannel(NioChannel channel) {
assert registeredChannels.contains(channel) == false : "Should only register channel once";
registeredChannels.add(channel);
}
public void removeRegisteredChannel(NioChannel channel) {
registeredChannels.remove(channel);
}
@Override
public void close() throws IOException {
if (isClosed.compareAndSet(false, true)) {

View File

@ -1,62 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport.nio;
import org.elasticsearch.transport.nio.channel.ChannelFactory;
import org.elasticsearch.transport.nio.channel.NioSocketChannel;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.concurrent.Semaphore;
import java.util.function.Supplier;
public class NioClient {
private final OpenChannels openChannels;
private final Supplier<SocketSelector> selectorSupplier;
private final ChannelFactory channelFactory;
private final Semaphore semaphore = new Semaphore(Integer.MAX_VALUE);
NioClient(OpenChannels openChannels, Supplier<SocketSelector> selectorSupplier, ChannelFactory channelFactory) {
this.openChannels = openChannels;
this.selectorSupplier = selectorSupplier;
this.channelFactory = channelFactory;
}
public void close() {
semaphore.acquireUninterruptibly(Integer.MAX_VALUE);
}
NioSocketChannel initiateConnection(InetSocketAddress address) throws IOException {
boolean allowedToConnect = semaphore.tryAcquire();
if (allowedToConnect == false) {
return null;
}
try {
SocketSelector selector = selectorSupplier.get();
NioSocketChannel nioSocketChannel = channelFactory.openNioChannel(address, selector);
openChannels.clientChannelOpened(nioSocketChannel);
return nioSocketChannel;
} finally {
semaphore.release();
}
}
}

View File

@ -34,13 +34,7 @@ public class NioShutdown {
this.logger = logger;
}
void orderlyShutdown(OpenChannels openChannels, NioClient client, ArrayList<AcceptingSelector> acceptors,
ArrayList<SocketSelector> socketSelectors) {
// Close the client. This ensures that no new send connections will be opened. Client could be null if exception was
// throw on start up
if (client != null) {
client.close();
}
void orderlyShutdown(OpenChannels openChannels, ArrayList<AcceptingSelector> acceptors, ArrayList<SocketSelector> socketSelectors) {
// Start by closing the server channels. Once these are closed, we are guaranteed to no accept new connections
openChannels.closeServerChannels();

View File

@ -20,10 +20,8 @@
package org.elasticsearch.transport.nio;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting;
@ -54,7 +52,7 @@ import static org.elasticsearch.common.settings.Setting.intSetting;
import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap;
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
public class NioTransport extends TcpTransport<NioChannel> {
public class NioTransport extends TcpTransport {
public static final String TRANSPORT_WORKER_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_WORKER_THREAD_NAME_PREFIX;
public static final String TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX = Transports.NIO_TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX;
@ -72,7 +70,8 @@ public class NioTransport extends TcpTransport<NioChannel> {
private final ConcurrentMap<String, ChannelFactory> profileToChannelFactory = newConcurrentMap();
private final ArrayList<AcceptingSelector> acceptors = new ArrayList<>();
private final ArrayList<SocketSelector> socketSelectors = new ArrayList<>();
private NioClient client;
private RoundRobinSelectorSupplier clientSelectorSupplier;
private ChannelFactory clientChannelFactory;
private int acceptorNumber;
public NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
@ -86,11 +85,6 @@ public class NioTransport extends TcpTransport<NioChannel> {
return openChannels.serverChannelsCount();
}
@Override
protected InetSocketAddress getLocalAddress(NioChannel channel) {
return channel.getLocalAddress();
}
@Override
protected NioServerSocketChannel bind(String name, InetSocketAddress address) throws IOException {
ChannelFactory channelFactory = this.profileToChannelFactory.get(name);
@ -99,22 +93,10 @@ public class NioTransport extends TcpTransport<NioChannel> {
}
@Override
protected void sendMessage(NioChannel channel, BytesReference reference, ActionListener<NioChannel> listener) {
if (channel instanceof NioSocketChannel) {
NioSocketChannel nioSocketChannel = (NioSocketChannel) channel;
nioSocketChannel.getWriteContext().sendMessage(reference, listener);
} else {
logger.error("cannot send message to channel of this type [{}]", channel.getClass());
}
}
@Override
protected NioChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<NioChannel> connectListener)
protected NioChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> connectListener)
throws IOException {
NioSocketChannel channel = client.initiateConnection(node.getAddress().address());
if (channel == null) {
throw new ElasticsearchException("client is shutdown");
}
NioSocketChannel channel = clientChannelFactory.openNioChannel(node.getAddress().address(), clientSelectorSupplier.get());
openChannels.clientChannelOpened(channel);
channel.addConnectListener(connectListener);
return channel;
}
@ -137,7 +119,8 @@ public class NioTransport extends TcpTransport<NioChannel> {
}
}
client = createClient();
clientSelectorSupplier = new RoundRobinSelectorSupplier(socketSelectors);
clientChannelFactory = new ChannelFactory(new ProfileSettings(settings, "default"), contextSetter);
if (NetworkService.NETWORK_SERVER.get(settings)) {
int acceptorCount = NioTransport.NIO_ACCEPTOR_COUNT.get(settings);
@ -178,7 +161,7 @@ public class NioTransport extends TcpTransport<NioChannel> {
@Override
protected void stopInternal() {
NioShutdown nioShutdown = new NioShutdown(logger);
nioShutdown.orderlyShutdown(openChannels, client, acceptors, socketSelectors);
nioShutdown.orderlyShutdown(openChannels, acceptors, socketSelectors);
profileToChannelFactory.clear();
socketSelectors.clear();
@ -188,15 +171,7 @@ public class NioTransport extends TcpTransport<NioChannel> {
return new SocketEventHandler(logger, this::exceptionCaught, openChannels);
}
final void exceptionCaught(NioSocketChannel channel, Throwable cause) {
final Throwable unwrapped = ExceptionsHelper.unwrap(cause, ElasticsearchException.class);
final Throwable t = unwrapped != null ? unwrapped : cause;
onException(channel, t instanceof Exception ? (Exception) t : new ElasticsearchException(t));
}
private NioClient createClient() {
Supplier<SocketSelector> selectorSupplier = new RoundRobinSelectorSupplier(socketSelectors);
ChannelFactory channelFactory = new ChannelFactory(new ProfileSettings(settings, "default"), contextSetter);
return new NioClient(openChannels, selectorSupplier, channelFactory);
final void exceptionCaught(NioSocketChannel channel, Exception exception) {
onException(channel, exception);
}
}

View File

@ -34,10 +34,10 @@ import java.util.function.BiConsumer;
*/
public class SocketEventHandler extends EventHandler {
private final BiConsumer<NioSocketChannel, Throwable> exceptionHandler;
private final BiConsumer<NioSocketChannel, Exception> exceptionHandler;
private final Logger logger;
public SocketEventHandler(Logger logger, BiConsumer<NioSocketChannel, Throwable> exceptionHandler, OpenChannels openChannels) {
public SocketEventHandler(Logger logger, BiConsumer<NioSocketChannel, Exception> exceptionHandler, OpenChannels openChannels) {
super(logger, openChannels);
this.exceptionHandler = exceptionHandler;
this.logger = logger;

View File

@ -171,7 +171,6 @@ public class SocketSelector extends ESSelector {
try {
if (newChannel.isOpen()) {
newChannel.register();
addRegisteredChannel(newChannel);
SelectionKey key = newChannel.getSelectionKey();
key.attach(newChannel);
eventHandler.handleRegistration(newChannel);

View File

@ -32,10 +32,9 @@ public class TcpReadHandler {
this.transport = transport;
}
public void handleMessage(BytesReference reference, NioSocketChannel channel, String profileName,
int messageBytesLength) {
public void handleMessage(BytesReference reference, NioSocketChannel channel, int messageBytesLength) {
try {
transport.messageReceived(reference, channel, profileName, channel.getRemoteAddress(), messageBytesLength);
transport.messageReceived(reference, channel, channel.getProfile(), channel.getRemoteAddress(), messageBytesLength);
} catch (IOException e) {
handleException(channel, e);
}

View File

@ -24,7 +24,6 @@ import org.apache.lucene.util.BytesRefIterator;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.transport.nio.channel.NioChannel;
import org.elasticsearch.transport.nio.channel.NioSocketChannel;
import java.io.IOException;
@ -33,10 +32,10 @@ import java.util.ArrayList;
public class WriteOperation {
private final NioSocketChannel channel;
private final ActionListener<NioChannel> listener;
private final ActionListener<Void> listener;
private final NetworkBytesReference[] references;
public WriteOperation(NioSocketChannel channel, BytesReference bytesReference, ActionListener<NioChannel> listener) {
public WriteOperation(NioSocketChannel channel, BytesReference bytesReference, ActionListener<Void> listener) {
this.channel = channel;
this.listener = listener;
this.references = toArray(bytesReference);
@ -46,7 +45,7 @@ public class WriteOperation {
return references;
}
public ActionListener<NioChannel> getListener() {
public ActionListener<Void> getListener() {
return listener;
}

Some files were not shown because too many files have changed in this diff Show More