mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
18155ed69a
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@ -22,7 +22,7 @@ Issues that do not follow these guidelines are likely to be closed.
|
||||
|
||||
<!-- Bug report -->
|
||||
|
||||
**Elasticsearch version**:
|
||||
**Elasticsearch version** (`bin/elasticsearch --version`):
|
||||
|
||||
**Plugins installed**: []
|
||||
|
||||
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@ -272,7 +272,7 @@ def provision(config,
|
||||
installed gradle || {
|
||||
echo "==> Installing Gradle"
|
||||
curl -sS -o /tmp/gradle.zip -L https://services.gradle.org/distributions/gradle-3.3-bin.zip
|
||||
unzip /tmp/gradle.zip -d /opt
|
||||
unzip -q /tmp/gradle.zip -d /opt
|
||||
rm -rf /tmp/gradle.zip
|
||||
ln -s /opt/gradle-3.3/bin/gradle /usr/bin/gradle
|
||||
# make nfs mounted gradle home dir writeable
|
||||
|
24
build.gradle
24
build.gradle
@ -79,18 +79,20 @@ int lastPrevMinor = -1 // the minor version number from the prev major we most r
|
||||
for (String line : versionLines) {
|
||||
/* Note that this skips alphas and betas which is fine because they aren't
|
||||
* compatible with anything. */
|
||||
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+) .*/
|
||||
Matcher match = line =~ /\W+public static final Version V_(\d+)_(\d+)_(\d+)(_beta\d+|_rc\d+)? .*/
|
||||
if (match.matches()) {
|
||||
int major = Integer.parseInt(match.group(1))
|
||||
int minor = Integer.parseInt(match.group(2))
|
||||
int bugfix = Integer.parseInt(match.group(3))
|
||||
Version foundVersion = new Version(major, minor, bugfix, false)
|
||||
if (currentVersion != foundVersion) {
|
||||
if (currentVersion != foundVersion
|
||||
&& (major == prevMajor || major == currentVersion.major)
|
||||
&& (versions.isEmpty() || versions.last() != foundVersion)) {
|
||||
versions.add(foundVersion)
|
||||
}
|
||||
if (major == prevMajor && minor > lastPrevMinor) {
|
||||
prevMinorIndex = versions.size() - 1
|
||||
lastPrevMinor = minor
|
||||
if (major == prevMajor && minor > lastPrevMinor) {
|
||||
prevMinorIndex = versions.size() - 1
|
||||
lastPrevMinor = minor
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -242,9 +244,11 @@ subprojects {
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-stable-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||
if (indexCompatVersions.size() > 1) {
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.zip:elasticsearch:${indexCompatVersions[-2]}"] = ':distribution:bwc-release-snapshot'
|
||||
}
|
||||
} else {
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.deb:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
|
||||
ext.projectSubstitutions["org.elasticsearch.distribution.rpm:elasticsearch:${indexCompatVersions[-1]}"] = ':distribution:bwc-release-snapshot'
|
||||
@ -268,7 +272,7 @@ subprojects {
|
||||
// the dependency is added.
|
||||
gradle.projectsEvaluated {
|
||||
allprojects {
|
||||
if (project.path == ':test:framework') {
|
||||
if (project.path == ':test:framework' || project.path == ':client:test') {
|
||||
// :test:framework:test cannot run before and after :core:test
|
||||
return
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import org.gradle.api.Project
|
||||
import org.gradle.api.Task
|
||||
import org.gradle.api.XmlProvider
|
||||
import org.gradle.api.artifacts.Configuration
|
||||
import org.gradle.api.artifacts.Dependency
|
||||
import org.gradle.api.artifacts.ModuleDependency
|
||||
import org.gradle.api.artifacts.ModuleVersionIdentifier
|
||||
import org.gradle.api.artifacts.ProjectDependency
|
||||
@ -269,8 +270,8 @@ class BuildPlugin implements Plugin<Project> {
|
||||
})
|
||||
|
||||
// force all dependencies added directly to compile/testCompile to be non-transitive, except for ES itself
|
||||
Closure disableTransitiveDeps = { ModuleDependency dep ->
|
||||
if (!(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
|
||||
Closure disableTransitiveDeps = { Dependency dep ->
|
||||
if (dep instanceof ModuleDependency && !(dep instanceof ProjectDependency) && dep.group.startsWith('org.elasticsearch') == false) {
|
||||
dep.transitive = false
|
||||
|
||||
// also create a configuration just for this dependency version, so that later
|
||||
@ -407,8 +408,9 @@ class BuildPlugin implements Plugin<Project> {
|
||||
static void configureCompile(Project project) {
|
||||
project.ext.compactProfile = 'compact3'
|
||||
project.afterEvaluate {
|
||||
// fail on all javac warnings
|
||||
project.tasks.withType(JavaCompile) {
|
||||
File gradleJavaHome = Jvm.current().javaHome
|
||||
// we fork because compiling lots of different classes in a shared jvm can eventually trigger GC overhead limitations
|
||||
options.fork = true
|
||||
options.forkOptions.executable = new File(project.javaHome, 'bin/javac')
|
||||
options.forkOptions.memoryMaximumSize = "1g"
|
||||
@ -425,6 +427,7 @@ class BuildPlugin implements Plugin<Project> {
|
||||
* -serial because we don't use java serialization.
|
||||
*/
|
||||
// don't even think about passing args with -J-xxx, oracle will ask you to submit a bug report :)
|
||||
// fail on all javac warnings
|
||||
options.compilerArgs << '-Werror' << '-Xlint:all,-path,-serial,-options,-deprecation' << '-Xdoclint:all' << '-Xdoclint:-missing'
|
||||
|
||||
// either disable annotation processor completely (default) or allow to enable them if an annotation processor is explicitly defined
|
||||
@ -439,9 +442,12 @@ class BuildPlugin implements Plugin<Project> {
|
||||
// hack until gradle supports java 9's new "--release" arg
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '--release' << '8'
|
||||
doFirst{
|
||||
sourceCompatibility = null
|
||||
targetCompatibility = null
|
||||
if (GradleVersion.current().getBaseVersion() < GradleVersion.version("4.1")) {
|
||||
// this hack is not needed anymore since Gradle 4.1, see https://github.com/gradle/gradle/pull/2474
|
||||
doFirst {
|
||||
sourceCompatibility = null
|
||||
targetCompatibility = null
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ import org.apache.tools.ant.DefaultLogger;
|
||||
import org.apache.tools.ant.Project;
|
||||
import org.elasticsearch.gradle.AntTask;
|
||||
import org.gradle.api.artifacts.Configuration;
|
||||
import org.gradle.api.artifacts.FileCollectionDependency;
|
||||
import org.gradle.api.file.FileCollection;
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
@ -85,7 +86,11 @@ public class ThirdPartyAuditTask extends AntTask {
|
||||
|
||||
// we only want third party dependencies.
|
||||
jars = configuration.fileCollection({ dependency ->
|
||||
dependency.group.startsWith("org.elasticsearch") == false
|
||||
// include SelfResolvingDependency with files in the validation
|
||||
if (dependency instanceof FileCollectionDependency) {
|
||||
return true
|
||||
}
|
||||
return dependency.group && dependency.group.startsWith("org.elasticsearch") == false
|
||||
});
|
||||
|
||||
// we don't want provided dependencies, which we have already scanned. e.g. don't
|
||||
|
@ -316,6 +316,8 @@ class ClusterFormationTasks {
|
||||
if (Version.fromString(node.nodeVersion).major >= 6) {
|
||||
esConfig['cluster.routing.allocation.disk.watermark.flood_stage'] = '1b'
|
||||
}
|
||||
// increase script compilation limit since tests can rapid-fire script compilations
|
||||
esConfig['script.max_compilations_per_minute'] = 2048
|
||||
esConfig.putAll(node.config.settings)
|
||||
|
||||
Task writeConfig = project.tasks.create(name: name, type: DefaultTask, dependsOn: setup)
|
||||
|
@ -134,7 +134,7 @@ class NodeInfo {
|
||||
wrapperScript = new File(cwd, "run.bat")
|
||||
esScript = new File(homeDir, 'bin/elasticsearch.bat')
|
||||
} else {
|
||||
executable = 'sh'
|
||||
executable = 'bash'
|
||||
wrapperScript = new File(cwd, "run")
|
||||
esScript = new File(homeDir, 'bin/elasticsearch')
|
||||
}
|
||||
@ -161,8 +161,6 @@ class NodeInfo {
|
||||
env.put('CONF_DIR', confDir)
|
||||
if (Version.fromString(nodeVersion).major == 5) {
|
||||
args.addAll("-E", "path.conf=${confDir}")
|
||||
} else {
|
||||
args.addAll("--path.conf", "${confDir}")
|
||||
}
|
||||
if (!System.properties.containsKey("tests.es.path.data")) {
|
||||
args.addAll("-E", "path.data=${-> dataDir.toString()}")
|
||||
|
@ -400,7 +400,6 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]AggregationPath.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]AggregatedDfs.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]DfsSearchResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]ShardSearchTransportRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]FieldLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafDocLookup.java" checks="LineLength" />
|
||||
@ -418,7 +417,7 @@
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotsService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]threadpool[/\\]ThreadPool.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeService.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]tribe[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]apache[/\\]lucene[/\\]queries[/\\]BlendedTermQueryTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]VersionTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]RejectionActionIT.java" checks="LineLength" />
|
||||
@ -545,7 +544,6 @@
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]deps[/\\]joda[/\\]SimpleJodaTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]BlockingClusterStatePublishResponseHandlerTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]discovery[/\\]zen[/\\]ZenDiscoveryUnitTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]document[/\\]DocumentActionsIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]EnvironmentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]NodeEnvironmentTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]explain[/\\]ExplainActionIT.java" checks="LineLength" />
|
||||
|
@ -15,31 +15,31 @@
|
||||
# language governing permissions and limitations under the License.
|
||||
|
||||
@defaultMessage Explicitly specify the ContentType of HTTP entities when creating
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String)
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.lang.String)
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.nio.charset.Charset)
|
||||
org.apache.http.entity.ByteArrayEntity#<init>(byte[])
|
||||
org.apache.http.entity.ByteArrayEntity#<init>(byte[],int,int)
|
||||
org.apache.http.entity.FileEntity#<init>(java.io.File)
|
||||
org.apache.http.entity.InputStreamEntity#<init>(java.io.InputStream)
|
||||
org.apache.http.entity.InputStreamEntity#<init>(java.io.InputStream,long)
|
||||
org.apache.http.nio.entity.NByteArrayEntity#<init>(byte[])
|
||||
org.apache.http.nio.entity.NByteArrayEntity#<init>(byte[],int,int)
|
||||
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File)
|
||||
org.apache.http.nio.entity.NStringEntity#<init>(java.lang.String)
|
||||
org.apache.http.nio.entity.NStringEntity#<init>(java.lang.String,java.lang.String)
|
||||
org.elasticsearch.client.http.entity.StringEntity#<init>(java.lang.String)
|
||||
org.elasticsearch.client.http.entity.StringEntity#<init>(java.lang.String,java.lang.String)
|
||||
org.elasticsearch.client.http.entity.StringEntity#<init>(java.lang.String,java.nio.charset.Charset)
|
||||
org.elasticsearch.client.http.entity.ByteArrayEntity#<init>(byte[])
|
||||
org.elasticsearch.client.http.entity.ByteArrayEntity#<init>(byte[],int,int)
|
||||
org.elasticsearch.client.http.entity.FileEntity#<init>(java.io.File)
|
||||
org.elasticsearch.client.http.entity.InputStreamEntity#<init>(java.io.InputStream)
|
||||
org.elasticsearch.client.http.entity.InputStreamEntity#<init>(java.io.InputStream,long)
|
||||
org.elasticsearch.client.http.nio.entity.NByteArrayEntity#<init>(byte[])
|
||||
org.elasticsearch.client.http.nio.entity.NByteArrayEntity#<init>(byte[],int,int)
|
||||
org.elasticsearch.client.http.nio.entity.NFileEntity#<init>(java.io.File)
|
||||
org.elasticsearch.client.http.nio.entity.NStringEntity#<init>(java.lang.String)
|
||||
org.elasticsearch.client.http.nio.entity.NStringEntity#<init>(java.lang.String,java.lang.String)
|
||||
|
||||
@defaultMessage Use non-deprecated constructors
|
||||
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String)
|
||||
org.apache.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String,boolean)
|
||||
org.apache.http.entity.FileEntity#<init>(java.io.File,java.lang.String)
|
||||
org.apache.http.entity.StringEntity#<init>(java.lang.String,java.lang.String,java.lang.String)
|
||||
org.elasticsearch.client.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String)
|
||||
org.elasticsearch.client.http.nio.entity.NFileEntity#<init>(java.io.File,java.lang.String,boolean)
|
||||
org.elasticsearch.client.http.entity.FileEntity#<init>(java.io.File,java.lang.String)
|
||||
org.elasticsearch.client.http.entity.StringEntity#<init>(java.lang.String,java.lang.String,java.lang.String)
|
||||
|
||||
@defaultMessage BasicEntity is easy to mess up and forget to set content type
|
||||
org.apache.http.entity.BasicHttpEntity#<init>()
|
||||
org.elasticsearch.client.http.entity.BasicHttpEntity#<init>()
|
||||
|
||||
@defaultMessage EntityTemplate is easy to mess up and forget to set content type
|
||||
org.apache.http.entity.EntityTemplate#<init>(org.apache.http.entity.ContentProducer)
|
||||
org.elasticsearch.client.http.entity.EntityTemplate#<init>(org.elasticsearch.client.http.entity.ContentProducer)
|
||||
|
||||
@defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type
|
||||
org.apache.http.entity.SerializableEntity#<init>(java.io.Serializable)
|
||||
org.elasticsearch.client.http.entity.SerializableEntity#<init>(java.io.Serializable)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# When updating elasticsearch, please update 'rest' version in core/src/main/resources/org/elasticsearch/bootstrap/test-framework.policy
|
||||
elasticsearch = 6.0.0-beta1
|
||||
elasticsearch = 7.0.0-alpha1
|
||||
lucene = 7.0.0-snapshot-00142c9
|
||||
|
||||
# optional dependencies
|
||||
|
@ -18,17 +18,17 @@
|
||||
*/
|
||||
package org.elasticsearch.client.benchmark.rest;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.conn.ConnectionKeepAliveStrategy;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHeaders;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpStatus;
|
||||
import org.elasticsearch.client.http.client.config.RequestConfig;
|
||||
import org.elasticsearch.client.http.conn.ConnectionKeepAliveStrategy;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.elasticsearch.client.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
@ -19,14 +19,14 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.client.methods.HttpDelete;
|
||||
import org.elasticsearch.client.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.client.http.client.methods.HttpHead;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.http.entity.ByteArrayEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
|
@ -19,19 +19,19 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.client.http.entity.ByteArrayEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicRequestLine;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
@ -178,4 +178,4 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
||||
return parseEntity(response.getEntity(), MainResponse::fromXContent);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.entity.ByteArrayEntity;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -20,20 +20,20 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.fasterxml.jackson.core.JsonParseException;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.entity.ByteArrayEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicRequestLine;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
|
@ -19,10 +19,10 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchStatusException;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
|
@ -19,11 +19,9 @@
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
@ -38,6 +36,7 @@ import org.elasticsearch.action.get.GetRequest;
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationResponse;
|
||||
@ -46,6 +45,11 @@ import org.elasticsearch.action.update.UpdateResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
|
@ -0,0 +1,68 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.main.MainResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* This class is used to generate the Java Main API documentation.
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example[]
|
||||
* // end::example[]
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/MainDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*/
|
||||
public class MainDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testMain() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::main-execute
|
||||
MainResponse response = client.info();
|
||||
//end::main-execute
|
||||
assertTrue(response.isAvailable());
|
||||
//tag::main-response
|
||||
ClusterName clusterName = response.getClusterName(); // <1>
|
||||
String clusterUuid = response.getClusterUuid(); // <2>
|
||||
String nodeName = response.getNodeName(); // <3>
|
||||
Version version = response.getVersion(); // <4>
|
||||
Build build = response.getBuild(); // <5>
|
||||
//end::main-response
|
||||
assertNotNull(clusterName);
|
||||
assertNotNull(clusterUuid);
|
||||
assertNotNull(nodeName);
|
||||
assertNotNull(version);
|
||||
assertNotNull(build);
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,162 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.delete.DeleteResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpStatus;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
|
||||
/**
|
||||
* This class is used to generate the documentation for the
|
||||
* docs/java-rest/high-level/migration.asciidoc page.
|
||||
*
|
||||
* You need to wrap your code between two tags like:
|
||||
* // tag::example[]
|
||||
* // end::example[]
|
||||
*
|
||||
* Where example is your tag name.
|
||||
*
|
||||
* Then in the documentation, you can extract what is between tag and end tags with
|
||||
* ["source","java",subs="attributes,callouts,macros"]
|
||||
* --------------------------------------------------
|
||||
* include-tagged::{doc-tests}/MigrationDocumentationIT.java[example]
|
||||
* --------------------------------------------------
|
||||
*/
|
||||
public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
|
||||
public void testCreateIndex() throws IOException {
|
||||
RestClient restClient = client();
|
||||
{
|
||||
//tag::migration-create-inded
|
||||
Settings indexSettings = Settings.builder() // <1>
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build();
|
||||
|
||||
String payload = XContentFactory.jsonBuilder() // <2>
|
||||
.startObject()
|
||||
.startObject("settings") // <3>
|
||||
.value(indexSettings)
|
||||
.endObject()
|
||||
.startObject("mappings") // <4>
|
||||
.startObject("doc")
|
||||
.startObject("properties")
|
||||
.startObject("time")
|
||||
.field("type", "date")
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject()
|
||||
.endObject().string();
|
||||
|
||||
HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); // <5>
|
||||
|
||||
Response response = restClient.performRequest("PUT", "my-index", emptyMap(), entity); // <6>
|
||||
if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
|
||||
// <7>
|
||||
}
|
||||
//end::migration-create-inded
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
}
|
||||
|
||||
public void testClusterHealth() throws IOException {
|
||||
RestClient restClient = client();
|
||||
{
|
||||
//tag::migration-cluster-health
|
||||
Response response = restClient.performRequest("GET", "/_cluster/health"); // <1>
|
||||
|
||||
ClusterHealthStatus healthStatus;
|
||||
try (InputStream is = response.getEntity().getContent()) { // <2>
|
||||
Map<String, Object> map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <3>
|
||||
healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <4>
|
||||
}
|
||||
|
||||
if (healthStatus == ClusterHealthStatus.GREEN) {
|
||||
// <5>
|
||||
}
|
||||
//end::migration-cluster-health
|
||||
assertSame(ClusterHealthStatus.GREEN, healthStatus);
|
||||
}
|
||||
}
|
||||
|
||||
public void testRequests() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
//tag::migration-request-ctor
|
||||
IndexRequest request = new IndexRequest("index", "doc", "id"); // <1>
|
||||
request.source("{\"field\":\"value\"}", XContentType.JSON);
|
||||
//end::migration-request-ctor
|
||||
|
||||
//tag::migration-request-ctor-execution
|
||||
IndexResponse response = client.index(request);
|
||||
//end::migration-request-ctor-execution
|
||||
assertEquals(RestStatus.CREATED, response.status());
|
||||
}
|
||||
{
|
||||
//tag::migration-request-sync-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "doc", "id");
|
||||
DeleteResponse response = client.delete(request); // <1>
|
||||
//end::migration-request-sync-execution
|
||||
assertEquals(RestStatus.OK, response.status());
|
||||
}
|
||||
{
|
||||
//tag::migration-request-async-execution
|
||||
DeleteRequest request = new DeleteRequest("index", "doc", "id"); // <1>
|
||||
client.deleteAsync(request, new ActionListener<DeleteResponse>() { // <2>
|
||||
@Override
|
||||
public void onResponse(DeleteResponse deleteResponse) {
|
||||
// <3>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <4>
|
||||
}
|
||||
});
|
||||
//end::migration-request-async-execution
|
||||
}
|
||||
}
|
||||
}
|
@ -142,7 +142,7 @@ public class QueryDSLDocumentationTests extends ESTestCase {
|
||||
FilterFunctionBuilder[] functions = {
|
||||
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
|
||||
matchQuery("name", "kimchy"), // <1>
|
||||
randomFunction("ABCDEF")), // <2>
|
||||
randomFunction()), // <2>
|
||||
new FunctionScoreQueryBuilder.FilterFunctionBuilder(
|
||||
exponentialDecayFunction("age", 0L, 1L)) // <3>
|
||||
};
|
||||
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
@ -33,8 +34,12 @@ import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.client.ESRestHighLevelClientTestCase;
|
||||
import org.elasticsearch.client.RestHighLevelClient;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.MatchQueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.Scroll;
|
||||
@ -49,6 +54,14 @@ import org.elasticsearch.search.aggregations.bucket.terms.Terms.Bucket;
|
||||
import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.metrics.avg.Avg;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightField;
|
||||
import org.elasticsearch.search.profile.ProfileResult;
|
||||
import org.elasticsearch.search.profile.ProfileShardResult;
|
||||
import org.elasticsearch.search.profile.aggregation.AggregationProfileShardResult;
|
||||
import org.elasticsearch.search.profile.query.CollectorResult;
|
||||
import org.elasticsearch.search.profile.query.QueryProfileShardResult;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.ScoreSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortOrder;
|
||||
import org.elasticsearch.search.suggest.Suggest;
|
||||
@ -65,6 +78,8 @@ import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
|
||||
/**
|
||||
@ -100,7 +115,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = client.bulk(request);
|
||||
assertSame(bulkResponse.status(), RestStatus.OK);
|
||||
assertSame(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
}
|
||||
{
|
||||
@ -132,10 +147,24 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
sourceBuilder.query(QueryBuilders.termQuery("user", "kimchy")); // <2>
|
||||
sourceBuilder.from(0); // <3>
|
||||
sourceBuilder.size(5); // <4>
|
||||
sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.ASC));
|
||||
sourceBuilder.timeout(new TimeValue(60, TimeUnit.SECONDS)); // <5>
|
||||
// end::search-source-basics
|
||||
|
||||
// tag::search-source-sorting
|
||||
sourceBuilder.sort(new ScoreSortBuilder().order(SortOrder.DESC)); // <1>
|
||||
sourceBuilder.sort(new FieldSortBuilder("_uid").order(SortOrder.ASC)); // <2>
|
||||
// end::search-source-sorting
|
||||
|
||||
// tag::search-source-filtering-off
|
||||
sourceBuilder.fetchSource(false);
|
||||
// end::search-source-filtering-off
|
||||
// tag::search-source-filtering-includes
|
||||
String[] includeFields = new String[] {"title", "user", "innerObject.*"};
|
||||
String[] excludeFields = new String[] {"_type"};
|
||||
sourceBuilder.fetchSource(includeFields, excludeFields);
|
||||
// end::search-source-filtering-includes
|
||||
sourceBuilder.fetchSource(true);
|
||||
|
||||
// tag::search-source-setter
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
searchRequest.source(sourceBuilder);
|
||||
@ -212,6 +241,33 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public void testBuildingSearchQueries() {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
// tag::search-query-builder-ctor
|
||||
MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("user", "kimchy"); // <1>
|
||||
// end::search-query-builder-ctor
|
||||
// tag::search-query-builder-options
|
||||
matchQueryBuilder.fuzziness(Fuzziness.AUTO); // <1>
|
||||
matchQueryBuilder.prefixLength(3); // <2>
|
||||
matchQueryBuilder.maxExpansions(10); // <3>
|
||||
// end::search-query-builder-options
|
||||
}
|
||||
{
|
||||
// tag::search-query-builders
|
||||
QueryBuilder matchQueryBuilder = QueryBuilders.matchQuery("user", "kimchy")
|
||||
.fuzziness(Fuzziness.AUTO)
|
||||
.prefixLength(3)
|
||||
.maxExpansions(10);
|
||||
// end::search-query-builders
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
// tag::search-query-setter
|
||||
searchSourceBuilder.query(matchQueryBuilder);
|
||||
// end::search-query-setter
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings({ "unused" })
|
||||
public void testSearchRequestAggregations() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
@ -225,7 +281,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
.source(XContentType.JSON, "company", "Elastic", "age", 40));
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = client.bulk(request);
|
||||
assertSame(bulkResponse.status(), RestStatus.OK);
|
||||
assertSame(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
}
|
||||
{
|
||||
@ -298,7 +354,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
request.add(new IndexRequest("posts", "doc", "4").source(XContentType.JSON, "user", "cbuescher"));
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = client.bulk(request);
|
||||
assertSame(bulkResponse.status(), RestStatus.OK);
|
||||
assertSame(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
}
|
||||
{
|
||||
@ -330,6 +386,137 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testSearchRequestHighlighting() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
BulkRequest request = new BulkRequest();
|
||||
request.add(new IndexRequest("posts", "doc", "1")
|
||||
.source(XContentType.JSON, "title", "In which order are my Elasticsearch queries executed?", "user",
|
||||
Arrays.asList("kimchy", "luca"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.add(new IndexRequest("posts", "doc", "2")
|
||||
.source(XContentType.JSON, "title", "Current status and upcoming changes in Elasticsearch", "user",
|
||||
Arrays.asList("kimchy", "christoph"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.add(new IndexRequest("posts", "doc", "3")
|
||||
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch", "user",
|
||||
Arrays.asList("kimchy", "tanguy"), "innerObject", Collections.singletonMap("key", "value")));
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = client.bulk(request);
|
||||
assertSame(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
}
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
// tag::search-request-highlighting
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
HighlightBuilder highlightBuilder = new HighlightBuilder(); // <1>
|
||||
HighlightBuilder.Field highlightTitle =
|
||||
new HighlightBuilder.Field("title"); // <2>
|
||||
highlightTitle.highlighterType("unified"); // <3>
|
||||
highlightBuilder.field(highlightTitle); // <4>
|
||||
HighlightBuilder.Field highlightUser = new HighlightBuilder.Field("user");
|
||||
highlightBuilder.field(highlightUser);
|
||||
searchSourceBuilder.highlighter(highlightBuilder);
|
||||
// end::search-request-highlighting
|
||||
searchSourceBuilder.query(QueryBuilders.boolQuery()
|
||||
.should(matchQuery("title", "Elasticsearch"))
|
||||
.should(matchQuery("user", "kimchy")));
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
SearchResponse searchResponse = client.search(searchRequest);
|
||||
{
|
||||
// tag::search-request-highlighting-get
|
||||
SearchHits hits = searchResponse.getHits();
|
||||
for (SearchHit hit : hits.getHits()) {
|
||||
Map<String, HighlightField> highlightFields = hit.getHighlightFields();
|
||||
HighlightField highlight = highlightFields.get("title"); // <1>
|
||||
Text[] fragments = highlight.fragments(); // <2>
|
||||
String fragmentString = fragments[0].string();
|
||||
}
|
||||
// end::search-request-highlighting-get
|
||||
hits = searchResponse.getHits();
|
||||
for (SearchHit hit : hits.getHits()) {
|
||||
Map<String, HighlightField> highlightFields = hit.getHighlightFields();
|
||||
HighlightField highlight = highlightFields.get("title");
|
||||
Text[] fragments = highlight.fragments();
|
||||
assertEquals(1, fragments.length);
|
||||
assertThat(fragments[0].string(), containsString("<em>Elasticsearch</em>"));
|
||||
highlight = highlightFields.get("user");
|
||||
fragments = highlight.fragments();
|
||||
assertEquals(1, fragments.length);
|
||||
assertThat(fragments[0].string(), containsString("<em>kimchy</em>"));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
public void testSearchRequestProfiling() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
IndexRequest request = new IndexRequest("posts", "doc", "1")
|
||||
.source(XContentType.JSON, "tags", "elasticsearch", "comments", 123);
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.WAIT_UNTIL);
|
||||
IndexResponse indexResponse = client.index(request);
|
||||
assertSame(RestStatus.CREATED, indexResponse.status());
|
||||
}
|
||||
{
|
||||
SearchRequest searchRequest = new SearchRequest();
|
||||
// tag::search-request-profiling
|
||||
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
|
||||
searchSourceBuilder.profile(true);
|
||||
// end::search-request-profiling
|
||||
searchSourceBuilder.query(QueryBuilders.termQuery("tags", "elasticsearch"));
|
||||
searchSourceBuilder.aggregation(AggregationBuilders.histogram("by_comments").field("comments").interval(100));
|
||||
searchRequest.source(searchSourceBuilder);
|
||||
|
||||
SearchResponse searchResponse = client.search(searchRequest);
|
||||
// tag::search-request-profiling-get
|
||||
Map<String, ProfileShardResult> profilingResults = searchResponse.getProfileResults(); // <1>
|
||||
for (Map.Entry<String, ProfileShardResult> profilingResult : profilingResults.entrySet()) { // <2>
|
||||
String key = profilingResult.getKey(); // <3>
|
||||
ProfileShardResult profileShardResult = profilingResult.getValue(); // <4>
|
||||
}
|
||||
// end::search-request-profiling-get
|
||||
|
||||
ProfileShardResult profileShardResult = profilingResults.values().iterator().next();
|
||||
assertNotNull(profileShardResult);
|
||||
|
||||
// tag::search-request-profiling-queries
|
||||
List<QueryProfileShardResult> queryProfileShardResults = profileShardResult.getQueryProfileResults(); // <1>
|
||||
for (QueryProfileShardResult queryProfileResult : queryProfileShardResults) { // <2>
|
||||
|
||||
}
|
||||
// end::search-request-profiling-queries
|
||||
assertThat(queryProfileShardResults.size(), equalTo(1));
|
||||
|
||||
for (QueryProfileShardResult queryProfileResult : queryProfileShardResults) {
|
||||
// tag::search-request-profiling-queries-results
|
||||
for (ProfileResult profileResult : queryProfileResult.getQueryResults()) { // <1>
|
||||
String queryName = profileResult.getQueryName(); // <2>
|
||||
long queryTimeInMillis = profileResult.getTime(); // <3>
|
||||
List<ProfileResult> profiledChildren = profileResult.getProfiledChildren(); // <4>
|
||||
}
|
||||
// end::search-request-profiling-queries-results
|
||||
|
||||
// tag::search-request-profiling-queries-collectors
|
||||
CollectorResult collectorResult = queryProfileResult.getCollectorResult(); // <1>
|
||||
String collectorName = collectorResult.getName(); // <2>
|
||||
Long collectorTimeInMillis = collectorResult.getTime(); // <3>
|
||||
List<CollectorResult> profiledChildren = collectorResult.getProfiledChildren(); // <4>
|
||||
// end::search-request-profiling-queries-collectors
|
||||
}
|
||||
|
||||
// tag::search-request-profiling-aggs
|
||||
AggregationProfileShardResult aggsProfileResults = profileShardResult.getAggregationProfileResults(); // <1>
|
||||
for (ProfileResult profileResult : aggsProfileResults.getProfileResults()) { // <2>
|
||||
String aggName = profileResult.getQueryName(); // <3>
|
||||
long aggTimeInMillis = profileResult.getTime(); // <4>
|
||||
List<ProfileResult> profiledChildren = profileResult.getProfiledChildren(); // <5>
|
||||
}
|
||||
// end::search-request-profiling-aggs
|
||||
assertThat(aggsProfileResults.getProfileResults().size(), equalTo(1));
|
||||
}
|
||||
}
|
||||
|
||||
public void testScroll() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
@ -342,7 +529,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
|
||||
.source(XContentType.JSON, "title", "The Future of Federated Search in Elasticsearch"));
|
||||
request.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE);
|
||||
BulkResponse bulkResponse = client.bulk(request);
|
||||
assertSame(bulkResponse.status(), RestStatus.OK);
|
||||
assertSame(RestStatus.OK, bulkResponse.status());
|
||||
assertFalse(bulkResponse.hasFailures());
|
||||
}
|
||||
{
|
||||
|
@ -19,6 +19,31 @@
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
|
||||
/**
|
||||
* The rest client is a shaded jar. It contains the source of the rest client, as well as all the dependencies,
|
||||
* shaded to the `org.elasticsearch.client` package. 2 artifacts come out of this build process. The shading process
|
||||
* only modifies the imports and class names and locations. It does not do any processing on the files. The classes used
|
||||
* to interact with the rest client are no different from the dependencies in the shade configuration, besides in name.
|
||||
*
|
||||
* IDEs do not like removing artifacts and changing configurations on the fly, so the bits that make the build use the
|
||||
* actual shaded jar (2) are only executed on the cli. Tests run in an IDE rely on the deps (1) jar.
|
||||
*
|
||||
* 1) A jar that contains *only* the `org.elasticsearch.client` shaded dependencies. This is a jar that is built before
|
||||
* the src is compiled. This jar is only used by the rest client so will compile. There exists a chicken-egg
|
||||
* situation where the src needs compilation and depends on `org.elasticsearch.client` shaded classes, so an
|
||||
* intermediary jar needs to exist to satisfy the compile. The `deps` classifier is added to this jar.
|
||||
* 2) The *actual* jar that will be used by clients. This has no classifier, contains the rest client src and
|
||||
* `org.elasticsearch.client`. This jar is the only actual output artifact of this job.
|
||||
*/
|
||||
buildscript {
|
||||
repositories {
|
||||
jcenter()
|
||||
}
|
||||
dependencies {
|
||||
classpath 'com.github.jengelman.gradle.plugins:shadow:2.0.1'
|
||||
}
|
||||
}
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'ru.vyarus.animalsniffer'
|
||||
apply plugin: 'nebula.maven-base-publish'
|
||||
@ -38,15 +63,74 @@ publishing {
|
||||
}
|
||||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
compile "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}"
|
||||
compile "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}"
|
||||
compile "commons-codec:commons-codec:${versions.commonscodec}"
|
||||
compile "commons-logging:commons-logging:${versions.commonslogging}"
|
||||
configurations {
|
||||
shade {
|
||||
transitive = false
|
||||
}
|
||||
}
|
||||
|
||||
testCompile "org.elasticsearch.client:test:${version}"
|
||||
// Useful for build time dependencies, as it is generated before compilation of the source in the rest client.
|
||||
// This cannot be used as the final shaded jar, as it will contain the compiled source and dependencies
|
||||
File shadedDir = file("${buildDir}/shaded")
|
||||
// This directory exists so that the shadeDeps task would produce an output, so we can add it (below) to the source set.
|
||||
File shadedSrcDir = file("${buildDir}/generated-dummy-shaded")
|
||||
task shadeDeps(type: com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar) {
|
||||
destinationDir = shadedDir
|
||||
configurations = [project.configurations.shade]
|
||||
classifier = 'deps'
|
||||
relocate 'org.apache', 'org.elasticsearch.client'
|
||||
|
||||
doLast {
|
||||
shadedSrcDir.mkdir()
|
||||
}
|
||||
}
|
||||
|
||||
jar {
|
||||
from zipTree(shadeDeps.outputs.files.singleFile)
|
||||
dependsOn shadeDeps
|
||||
}
|
||||
|
||||
// remove the deps jar from the classpath to avoid jarHell
|
||||
if (isIdea == false && isEclipse == false) {
|
||||
// cleanup to remove the deps jar from the classpath
|
||||
if (gradle.gradleVersion == "3.3") {
|
||||
configurations.runtime.extendsFrom -= [configurations.compile]
|
||||
} else if (gradle.gradleVersion > "3.3") {
|
||||
configurations.runtimeElements.extendsFrom = []
|
||||
}
|
||||
}
|
||||
|
||||
if (isEclipse) {
|
||||
// in eclipse the project is under a fake root, we need to change around the source sets
|
||||
sourceSets {
|
||||
if (project.path == ":client:rest") {
|
||||
main.java.srcDirs = ['java']
|
||||
//main.resources.srcDirs = ['resources']
|
||||
} else {
|
||||
test.java.srcDirs = ['java']
|
||||
test.resources.srcDirs = ['resources']
|
||||
}
|
||||
}
|
||||
}
|
||||
// adds a dependency to compile, so the -deps jar is built first
|
||||
sourceSets.main.output.dir(shadedSrcDir, builtBy: 'shadeDeps')
|
||||
|
||||
dependencies {
|
||||
shade "org.apache.httpcomponents:httpclient:${versions.httpclient}"
|
||||
shade "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
shade "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}"
|
||||
shade "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}"
|
||||
shade "commons-codec:commons-codec:${versions.commonscodec}"
|
||||
shade "commons-logging:commons-logging:${versions.commonslogging}"
|
||||
|
||||
compile shadeDeps.outputs.files
|
||||
|
||||
if (isEclipse == false || project.path == ":client:rest-tests") {
|
||||
testCompile("org.elasticsearch.client:test:${version}") {
|
||||
// tests use the locally compiled version of core
|
||||
exclude group: 'org.elasticsearch', module: 'elasticsearch'
|
||||
}
|
||||
}
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
@ -56,6 +140,16 @@ dependencies {
|
||||
signature "org.codehaus.mojo.signature:java17:1.0@signature"
|
||||
}
|
||||
|
||||
// Set the exported=true for the generated rest client deps since it is used by other projects in eclipse.
|
||||
// https://docs.gradle.org/3.3/userguide/eclipse_plugin.html#sec:eclipse_modify_domain_objects
|
||||
eclipse.classpath.file {
|
||||
whenMerged { classpath ->
|
||||
classpath.entries.findAll { entry -> entry.path.contains("elasticsearch-rest-client") }*.exported = true
|
||||
}
|
||||
}
|
||||
|
||||
dependencyLicenses.dependencies = project.configurations.shade
|
||||
|
||||
forbiddenApisMain {
|
||||
//client does not depend on core, so only jdk and http signatures should be checked
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt'),
|
||||
@ -72,7 +166,7 @@ forbiddenApisTest {
|
||||
}
|
||||
|
||||
//JarHell is part of es core, which we don't want to pull in
|
||||
jarHell.enabled=false
|
||||
jarHell.enabled = false
|
||||
|
||||
namingConventions {
|
||||
testClass = 'org.elasticsearch.client.RestClientTestCase'
|
||||
@ -82,13 +176,13 @@ namingConventions {
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
//commons-logging optional dependencies
|
||||
'org.apache.avalon.framework.logger.Logger',
|
||||
'org.apache.log.Hierarchy',
|
||||
'org.apache.log.Logger',
|
||||
'org.apache.log4j.Category',
|
||||
'org.apache.log4j.Level',
|
||||
'org.apache.log4j.Logger',
|
||||
'org.apache.log4j.Priority',
|
||||
'org.elasticsearch.client.avalon.framework.logger.Logger',
|
||||
'org.elasticsearch.client.log.Hierarchy',
|
||||
'org.elasticsearch.client.log.Logger',
|
||||
'org.elasticsearch.client.log4j.Category',
|
||||
'org.elasticsearch.client.log4j.Level',
|
||||
'org.elasticsearch.client.log4j.Logger',
|
||||
'org.elasticsearch.client.log4j.Priority',
|
||||
//commons-logging provided dependencies
|
||||
'javax.servlet.ServletContextEvent',
|
||||
'javax.servlet.ServletContextListener'
|
||||
|
2
client/rest/src/main/eclipse-build.gradle
Normal file
2
client/rest/src/main/eclipse-build.gradle
Normal file
@ -0,0 +1,2 @@
|
||||
// this is just shell gradle file for eclipse to have separate projects for src and tests
|
||||
apply from: '../../build.gradle'
|
@ -19,24 +19,24 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.ContentTooLongException;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpException;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.nio.ContentDecoder;
|
||||
import org.apache.http.nio.IOControl;
|
||||
import org.apache.http.nio.entity.ContentBufferEntity;
|
||||
import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer;
|
||||
import org.apache.http.nio.util.ByteBufferAllocator;
|
||||
import org.apache.http.nio.util.HeapByteBufferAllocator;
|
||||
import org.apache.http.nio.util.SimpleInputBuffer;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
import org.elasticsearch.client.http.ContentTooLongException;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpException;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.nio.ContentDecoder;
|
||||
import org.elasticsearch.client.http.nio.IOControl;
|
||||
import org.elasticsearch.client.http.nio.entity.ContentBufferEntity;
|
||||
import org.elasticsearch.client.http.nio.protocol.AbstractAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.http.nio.util.ByteBufferAllocator;
|
||||
import org.elasticsearch.client.http.nio.util.HeapByteBufferAllocator;
|
||||
import org.elasticsearch.client.http.nio.util.SimpleInputBuffer;
|
||||
import org.elasticsearch.client.http.protocol.HttpContext;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole
|
||||
* Default implementation of {@link org.elasticsearch.client.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole
|
||||
* response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response.
|
||||
* Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer
|
||||
* than the configured buffer limit.
|
||||
|
@ -19,8 +19,8 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
|
||||
import static org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.DEFAULT_BUFFER_LIMIT;
|
||||
|
||||
|
@ -18,8 +18,8 @@
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpDelete;
|
||||
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
import org.elasticsearch.client.http.client.methods.HttpDelete;
|
||||
import org.elasticsearch.client.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
|
@ -18,8 +18,8 @@
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.client.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
import org.elasticsearch.client.http.client.methods.HttpGet;
|
||||
|
||||
import java.net.URI;
|
||||
|
||||
@ -38,4 +38,4 @@ final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase {
|
||||
public String getMethod() {
|
||||
return METHOD_NAME;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,18 +19,18 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.entity.BufferedHttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.commons.logging.Log;
|
||||
import org.elasticsearch.client.commons.logging.LogFactory;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpEntityEnclosingRequest;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.client.methods.HttpUriRequest;
|
||||
import org.elasticsearch.client.http.entity.BufferedHttpEntity;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
|
@ -19,12 +19,12 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.BufferedHttpEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.entity.BufferedHttpEntity;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -18,32 +18,32 @@
|
||||
*/
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpRequest;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.client.AuthCache;
|
||||
import org.apache.http.client.ClientProtocolException;
|
||||
import org.apache.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpOptions;
|
||||
import org.apache.http.client.methods.HttpPatch;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.client.methods.HttpRequestBase;
|
||||
import org.apache.http.client.methods.HttpTrace;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.apache.http.concurrent.FutureCallback;
|
||||
import org.apache.http.impl.auth.BasicScheme;
|
||||
import org.apache.http.impl.client.BasicAuthCache;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.nio.client.methods.HttpAsyncMethods;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.commons.logging.Log;
|
||||
import org.elasticsearch.client.commons.logging.LogFactory;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpRequest;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.client.AuthCache;
|
||||
import org.elasticsearch.client.http.client.ClientProtocolException;
|
||||
import org.elasticsearch.client.http.client.methods.HttpEntityEnclosingRequestBase;
|
||||
import org.elasticsearch.client.http.client.methods.HttpHead;
|
||||
import org.elasticsearch.client.http.client.methods.HttpOptions;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPatch;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.http.client.methods.HttpRequestBase;
|
||||
import org.elasticsearch.client.http.client.methods.HttpTrace;
|
||||
import org.elasticsearch.client.http.client.protocol.HttpClientContext;
|
||||
import org.elasticsearch.client.http.client.utils.URIBuilder;
|
||||
import org.elasticsearch.client.http.concurrent.FutureCallback;
|
||||
import org.elasticsearch.client.http.impl.auth.BasicScheme;
|
||||
import org.elasticsearch.client.http.impl.client.BasicAuthCache;
|
||||
import org.elasticsearch.client.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.elasticsearch.client.http.nio.client.methods.HttpAsyncMethods;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -19,14 +19,14 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.nio.conn.SchemeIOSessionStrategy;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.client.config.RequestConfig;
|
||||
import org.elasticsearch.client.http.impl.client.CloseableHttpClient;
|
||||
import org.elasticsearch.client.http.impl.client.HttpClientBuilder;
|
||||
import org.elasticsearch.client.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.elasticsearch.client.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.elasticsearch.client.http.nio.conn.SchemeIOSessionStrategy;
|
||||
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
@ -34,8 +34,8 @@ import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally
|
||||
* creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created
|
||||
* {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed.
|
||||
* creating the underlying {@link org.elasticsearch.client.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created
|
||||
* {@link org.elasticsearch.client.http.nio.client.HttpAsyncClient} in case additional customization is needed.
|
||||
*/
|
||||
public final class RestClientBuilder {
|
||||
public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000;
|
||||
@ -202,11 +202,18 @@ public final class RestClientBuilder {
|
||||
|
||||
HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create().setDefaultRequestConfig(requestConfigBuilder.build())
|
||||
//default settings for connection pooling may be too constraining
|
||||
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL);
|
||||
.setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE).setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL).useSystemProperties();
|
||||
if (httpClientConfigCallback != null) {
|
||||
httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder);
|
||||
}
|
||||
return httpClientBuilder.build();
|
||||
|
||||
final HttpAsyncClientBuilder finalBuilder = httpClientBuilder;
|
||||
return AccessController.doPrivileged(new PrivilegedAction<CloseableHttpAsyncClient>() {
|
||||
@Override
|
||||
public CloseableHttpAsyncClient run() {
|
||||
return finalBuilder.build();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
@ -230,7 +237,7 @@ public final class RestClientBuilder {
|
||||
public interface HttpClientConfigCallback {
|
||||
/**
|
||||
* Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}.
|
||||
* Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication
|
||||
* Commonly used to customize the default {@link org.elasticsearch.client.http.client.CredentialsProvider} for authentication
|
||||
* or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default
|
||||
* value that the {@link RestClientBuilder} internally sets, like connection pooling.
|
||||
*/
|
||||
|
6
client/rest/src/test/eclipse-build.gradle
Normal file
6
client/rest/src/test/eclipse-build.gradle
Normal file
@ -0,0 +1,6 @@
|
||||
// this is just shell gradle file for eclipse to have separate projects for src and tests
|
||||
apply from: '../../build.gradle'
|
||||
|
||||
dependencies {
|
||||
testCompile project(':client:rest')
|
||||
}
|
@ -19,14 +19,14 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicRequestLine;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
|
@ -19,19 +19,19 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.ContentTooLongException;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.ContentDecoder;
|
||||
import org.apache.http.nio.IOControl;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
import org.elasticsearch.client.http.ContentTooLongException;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.nio.ContentDecoder;
|
||||
import org.elasticsearch.client.http.nio.IOControl;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.http.protocol.HttpContext;
|
||||
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
@ -48,4 +48,4 @@ class HostsTrackingFailureListener extends RestClient.FailureListener {
|
||||
void assertNotCalled() {
|
||||
assertEquals(0, hosts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,27 +19,27 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpOptions;
|
||||
import org.apache.http.client.methods.HttpPatch;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.client.methods.HttpTrace;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.InputStreamEntity;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.entity.NByteArrayEntity;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpEntityEnclosingRequest;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.client.methods.HttpHead;
|
||||
import org.elasticsearch.client.http.client.methods.HttpOptions;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPatch;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.http.client.methods.HttpTrace;
|
||||
import org.elasticsearch.client.http.client.methods.HttpUriRequest;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.InputStreamEntity;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.nio.entity.NByteArrayEntity;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -19,19 +19,19 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.InputStreamEntity;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.InputStreamEntity;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicRequestLine;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
|
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpsConfigurator;
|
||||
import com.sun.net.httpserver.HttpsServer;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.TrustManagerFactory;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.security.KeyStore;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
/**
|
||||
* Integration test to validate the builder builds a client with the correct configuration
|
||||
*/
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
public class RestClientBuilderIntegTests extends RestClientTestCase {
|
||||
|
||||
private static HttpsServer httpsServer;
|
||||
|
||||
@BeforeClass
|
||||
public static void startHttpServer() throws Exception {
|
||||
httpsServer = MockHttpServer.createHttps(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0);
|
||||
httpsServer.setHttpsConfigurator(new HttpsConfigurator(getSslContext()));
|
||||
httpsServer.createContext("/", new ResponseHandler());
|
||||
httpsServer.start();
|
||||
}
|
||||
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@IgnoreJRERequirement
|
||||
private static class ResponseHandler implements HttpHandler {
|
||||
@Override
|
||||
public void handle(HttpExchange httpExchange) throws IOException {
|
||||
httpExchange.sendResponseHeaders(200, -1);
|
||||
httpExchange.close();
|
||||
}
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void stopHttpServers() throws IOException {
|
||||
httpsServer.stop(0);
|
||||
httpsServer = null;
|
||||
}
|
||||
|
||||
public void testBuilderUsesDefaultSSLContext() throws Exception {
|
||||
final SSLContext defaultSSLContext = SSLContext.getDefault();
|
||||
try {
|
||||
try (RestClient client = buildRestClient()) {
|
||||
try {
|
||||
client.performRequest("GET", "/");
|
||||
fail("connection should have been rejected due to SSL handshake");
|
||||
} catch (Exception e) {
|
||||
assertThat(e.getMessage(), containsString("General SSLEngine problem"));
|
||||
}
|
||||
}
|
||||
|
||||
SSLContext.setDefault(getSslContext());
|
||||
try (RestClient client = buildRestClient()) {
|
||||
Response response = client.performRequest("GET", "/");
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
}
|
||||
} finally {
|
||||
SSLContext.setDefault(defaultSSLContext);
|
||||
}
|
||||
}
|
||||
|
||||
private RestClient buildRestClient() {
|
||||
InetSocketAddress address = httpsServer.getAddress();
|
||||
return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "https")).build();
|
||||
}
|
||||
|
||||
private static SSLContext getSslContext() throws Exception {
|
||||
SSLContext sslContext = SSLContext.getInstance("TLS");
|
||||
try (InputStream in = RestClientBuilderIntegTests.class.getResourceAsStream("/testks.jks")) {
|
||||
KeyStore keyStore = KeyStore.getInstance("JKS");
|
||||
keyStore.load(in, "password".toCharArray());
|
||||
KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
|
||||
kmf.init(keyStore, "password".toCharArray());
|
||||
TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
|
||||
tmf.init(keyStore);
|
||||
sslContext.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
|
||||
}
|
||||
return sslContext;
|
||||
}
|
||||
}
|
@ -19,11 +19,11 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.client.config.RequestConfig;
|
||||
import org.elasticsearch.client.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.elasticsearch.client.http.message.BasicHeader;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -22,7 +22,7 @@ package org.elasticsearch.client;
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
@ -45,7 +45,7 @@ import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.elasticsearch.client.http.client.HttpClient}.
|
||||
* Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts.
|
||||
*/
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
|
@ -20,21 +20,21 @@
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.concurrent.FutureCallback;
|
||||
import org.apache.http.conn.ConnectTimeoutException;
|
||||
import org.apache.http.impl.auth.BasicScheme;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.client.methods.HttpUriRequest;
|
||||
import org.elasticsearch.client.http.client.protocol.HttpClientContext;
|
||||
import org.elasticsearch.client.http.concurrent.FutureCallback;
|
||||
import org.elasticsearch.client.http.conn.ConnectTimeoutException;
|
||||
import org.elasticsearch.client.http.impl.auth.BasicScheme;
|
||||
import org.elasticsearch.client.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.junit.Before;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
import org.mockito.stubbing.Answer;
|
||||
|
@ -23,16 +23,16 @@ import com.sun.net.httpserver.Headers;
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.Consts;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.Consts;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.auth.AuthScope;
|
||||
import org.elasticsearch.client.http.auth.UsernamePasswordCredentials;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.impl.client.BasicCredentialsProvider;
|
||||
import org.elasticsearch.client.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.mocksocket.MockHttpServer;
|
||||
import org.junit.AfterClass;
|
||||
@ -60,7 +60,7 @@ import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
/**
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}.
|
||||
* Integration test to check interaction between {@link RestClient} and {@link org.elasticsearch.client.http.client.HttpClient}.
|
||||
* Works against a real http server, one single host.
|
||||
*/
|
||||
//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes
|
||||
@ -164,7 +164,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
||||
|
||||
/**
|
||||
* End to end test for headers. We test it explicitly against a real http client as there are different ways
|
||||
* to set/add headers to the {@link org.apache.http.client.HttpClient}.
|
||||
* to set/add headers to the {@link org.elasticsearch.client.http.client.HttpClient}.
|
||||
* Exercises the test http server ability to send back whatever headers it received.
|
||||
*/
|
||||
public void testHeaders() throws IOException {
|
||||
@ -198,7 +198,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
||||
|
||||
/**
|
||||
* End to end test for delete with body. We test it explicitly as it is not supported
|
||||
* out of the box by {@link org.apache.http.client.HttpClient}.
|
||||
* out of the box by {@link org.elasticsearch.client.http.client.HttpClient}.
|
||||
* Exercises the test http server ability to send back whatever body it received.
|
||||
*/
|
||||
public void testDeleteWithBody() throws IOException {
|
||||
@ -207,7 +207,7 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
||||
|
||||
/**
|
||||
* End to end test for get with body. We test it explicitly as it is not supported
|
||||
* out of the box by {@link org.apache.http.client.HttpClient}.
|
||||
* out of the box by {@link org.elasticsearch.client.http.client.HttpClient}.
|
||||
* Exercises the test http server ability to send back whatever body it received.
|
||||
*/
|
||||
public void testGetWithBody() throws IOException {
|
||||
|
@ -19,34 +19,34 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpEntityEnclosingRequest;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpRequest;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.client.methods.HttpHead;
|
||||
import org.apache.http.client.methods.HttpOptions;
|
||||
import org.apache.http.client.methods.HttpPatch;
|
||||
import org.apache.http.client.methods.HttpPost;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.client.methods.HttpTrace;
|
||||
import org.apache.http.client.methods.HttpUriRequest;
|
||||
import org.apache.http.client.protocol.HttpClientContext;
|
||||
import org.apache.http.client.utils.URIBuilder;
|
||||
import org.apache.http.concurrent.FutureCallback;
|
||||
import org.apache.http.conn.ConnectTimeoutException;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.auth.BasicScheme;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.apache.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpEntityEnclosingRequest;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpRequest;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.client.methods.HttpHead;
|
||||
import org.elasticsearch.client.http.client.methods.HttpOptions;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPatch;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPost;
|
||||
import org.elasticsearch.client.http.client.methods.HttpPut;
|
||||
import org.elasticsearch.client.http.client.methods.HttpTrace;
|
||||
import org.elasticsearch.client.http.client.methods.HttpUriRequest;
|
||||
import org.elasticsearch.client.http.client.protocol.HttpClientContext;
|
||||
import org.elasticsearch.client.http.client.utils.URIBuilder;
|
||||
import org.elasticsearch.client.http.concurrent.FutureCallback;
|
||||
import org.elasticsearch.client.http.conn.ConnectTimeoutException;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.entity.StringEntity;
|
||||
import org.elasticsearch.client.http.impl.auth.BasicScheme;
|
||||
import org.elasticsearch.client.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncRequestProducer;
|
||||
import org.elasticsearch.client.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
import org.junit.Before;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
import org.mockito.invocation.InvocationOnMock;
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.impl.nio.client.CloseableHttpAsyncClient;
|
||||
|
||||
import java.net.URI;
|
||||
import java.util.Collections;
|
||||
|
@ -19,14 +19,14 @@
|
||||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.HttpResponse;
|
||||
import org.apache.http.ProtocolVersion;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.StatusLine;
|
||||
import org.apache.http.message.BasicHttpResponse;
|
||||
import org.apache.http.message.BasicRequestLine;
|
||||
import org.apache.http.message.BasicStatusLine;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpResponse;
|
||||
import org.elasticsearch.client.http.ProtocolVersion;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.StatusLine;
|
||||
import org.elasticsearch.client.http.message.BasicHttpResponse;
|
||||
import org.elasticsearch.client.http.message.BasicRequestLine;
|
||||
import org.elasticsearch.client.http.message.BasicStatusLine;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URISyntaxException;
|
||||
|
@ -19,21 +19,21 @@
|
||||
|
||||
package org.elasticsearch.client.documentation;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.RequestLine;
|
||||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.impl.nio.reactor.IOReactorConfig;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.RequestLine;
|
||||
import org.elasticsearch.client.http.auth.AuthScope;
|
||||
import org.elasticsearch.client.http.auth.UsernamePasswordCredentials;
|
||||
import org.elasticsearch.client.http.client.CredentialsProvider;
|
||||
import org.elasticsearch.client.http.client.config.RequestConfig;
|
||||
import org.elasticsearch.client.http.entity.ContentType;
|
||||
import org.elasticsearch.client.http.impl.client.BasicCredentialsProvider;
|
||||
import org.elasticsearch.client.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.elasticsearch.client.http.impl.nio.reactor.IOReactorConfig;
|
||||
import org.elasticsearch.client.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.client.http.util.EntityUtils;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseListener;
|
||||
|
BIN
client/rest/src/test/resources/testks.jks
Normal file
BIN
client/rest/src/test/resources/testks.jks
Normal file
Binary file not shown.
@ -40,8 +40,6 @@ publishing {
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
|
||||
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
compile "commons-codec:commons-codec:${versions.commonscodec}"
|
||||
compile "commons-logging:commons-logging:${versions.commonslogging}"
|
||||
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
|
@ -1 +0,0 @@
|
||||
733db77aa8d9b2d68015189df76ab06304406e50
|
@ -1,558 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
=========================================================================
|
||||
|
||||
This project includes Public Suffix List copied from
|
||||
<https://publicsuffix.org/list/effective_tld_names.dat>
|
||||
licensed under the terms of the Mozilla Public License, v. 2.0
|
||||
|
||||
Full license text: <http://mozilla.org/MPL/2.0/>
|
||||
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
@ -1,6 +0,0 @@
|
||||
Apache HttpComponents Client
|
||||
Copyright 1999-2016 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
|
@ -1 +0,0 @@
|
||||
e7501a1b34325abb00d17dde96150604a0658b54
|
@ -24,8 +24,8 @@ import com.fasterxml.jackson.core.JsonParser;
|
||||
import com.fasterxml.jackson.core.JsonToken;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpEntity;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
|
||||
import java.util.Objects;
|
||||
|
@ -21,7 +21,7 @@ package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientBuilder;
|
||||
|
||||
|
@ -27,9 +27,9 @@ import com.fasterxml.jackson.core.JsonGenerator;
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpHandler;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import org.apache.http.Consts;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.elasticsearch.client.http.Consts;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.http.client.methods.HttpGet;
|
||||
import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
package org.elasticsearch.client.sniff;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.RestClientTestCase;
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
package org.elasticsearch.client.sniff.documentation;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.client.http.HttpHost;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.sniff.ElasticsearchHostsSniffer;
|
||||
import org.elasticsearch.client.sniff.HostsSniffer;
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
|
||||
import org.elasticsearch.gradle.precommit.PrecommitTasks
|
||||
import org.gradle.api.JavaVersion
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'ru.vyarus.animalsniffer'
|
||||
@ -27,7 +26,7 @@ targetCompatibility = JavaVersion.VERSION_1_7
|
||||
sourceCompatibility = JavaVersion.VERSION_1_7
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
compile "org.elasticsearch.client:elasticsearch-rest-client:${version}"
|
||||
compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
compile "junit:junit:${versions.junit}"
|
||||
compile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
@ -30,7 +30,7 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakLingering;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope;
|
||||
import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies;
|
||||
import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite;
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -22,8 +22,8 @@ package org.elasticsearch.client;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomNumbers;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomStrings;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.elasticsearch.client.http.Header;
|
||||
import org.elasticsearch.client.http.message.BasicHeader;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
|
@ -258,6 +258,10 @@ thirdPartyAudit.excludes = [
|
||||
'org.noggit.JSONParser',
|
||||
]
|
||||
|
||||
if (JavaVersion.current() > JavaVersion.VERSION_1_8) {
|
||||
thirdPartyAudit.excludes += ['javax.xml.bind.DatatypeConverter']
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /lucene-.*/, to: 'lucene'
|
||||
mapping from: /jackson-.*/, to: 'jackson'
|
||||
|
@ -0,0 +1,71 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.apache.lucene.document;
|
||||
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
/**
|
||||
* A range field for binary encoded ranges
|
||||
*/
|
||||
public final class BinaryRange extends Field {
|
||||
/** The number of bytes per dimension, use {@link InetAddressPoint#BYTES} as max, because that is maximum we need to support */
|
||||
public static final int BYTES = InetAddressPoint.BYTES;
|
||||
|
||||
private static final FieldType TYPE;
|
||||
static {
|
||||
TYPE = new FieldType();
|
||||
TYPE.setDimensions(2, BYTES);
|
||||
TYPE.freeze();
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new BinaryRange from a provided encoded binary range
|
||||
* @param name field name. must not be null.
|
||||
* @param encodedRange Encoded range
|
||||
*/
|
||||
public BinaryRange(String name, byte[] encodedRange) {
|
||||
super(name, TYPE);
|
||||
if (encodedRange.length != BYTES * 2) {
|
||||
throw new IllegalArgumentException("Unexpected encoded range length [" + encodedRange.length + "]");
|
||||
}
|
||||
fieldsData = new BytesRef(encodedRange);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a query for matching indexed ip ranges that {@code INTERSECT} the defined range.
|
||||
* @param field field name. must not be null.
|
||||
* @param encodedRange Encoded range
|
||||
* @return query for matching intersecting encoded ranges (overlap, within, crosses, or contains)
|
||||
* @throws IllegalArgumentException if {@code field} is null, {@code min} or {@code max} is invalid
|
||||
*/
|
||||
public static Query newIntersectsQuery(String field, byte[] encodedRange) {
|
||||
return newRelationQuery(field, encodedRange, RangeFieldQuery.QueryType.INTERSECTS);
|
||||
}
|
||||
|
||||
static Query newRelationQuery(String field, byte[] encodedRange, RangeFieldQuery.QueryType relation) {
|
||||
return new RangeFieldQuery(field, encodedRange, 1, relation) {
|
||||
@Override
|
||||
protected String toString(byte[] ranges, int dimension) {
|
||||
return "[" + new BytesRef(ranges, 0, BYTES) + " TO " + new BytesRef(ranges, BYTES, BYTES) + "]";
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.apache.lucene.queries;
|
||||
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.ConstantScoreScorer;
|
||||
import org.apache.lucene.search.ConstantScoreWeight;
|
||||
@ -35,16 +36,26 @@ import java.util.Objects;
|
||||
* to a configured doc ID. */
|
||||
public final class MinDocQuery extends Query {
|
||||
|
||||
// Matching documents depend on the sequence of segments that the index reader
|
||||
// wraps. Yet matches must be cacheable per-segment, so we need to incorporate
|
||||
// the reader id in the identity of the query so that a cache entry may only
|
||||
// be reused if this query is run against the same index reader.
|
||||
private final Object readerId;
|
||||
private final int minDoc;
|
||||
|
||||
/** Sole constructor. */
|
||||
public MinDocQuery(int minDoc) {
|
||||
this(minDoc, null);
|
||||
}
|
||||
|
||||
MinDocQuery(int minDoc, Object readerId) {
|
||||
this.minDoc = minDoc;
|
||||
this.readerId = readerId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(classHash(), minDoc);
|
||||
return Objects.hash(classHash(), minDoc, readerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -53,11 +64,24 @@ public final class MinDocQuery extends Query {
|
||||
return false;
|
||||
}
|
||||
MinDocQuery that = (MinDocQuery) obj;
|
||||
return minDoc == that.minDoc;
|
||||
return minDoc == that.minDoc && Objects.equals(readerId, that.readerId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Query rewrite(IndexReader reader) throws IOException {
|
||||
if (Objects.equals(reader.getContext().id(), readerId) == false) {
|
||||
return new MinDocQuery(minDoc, reader.getContext().id());
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
|
||||
if (readerId == null) {
|
||||
throw new IllegalStateException("Rewrite first");
|
||||
} else if (Objects.equals(searcher.getIndexReader().getContext().id(), readerId) == false) {
|
||||
throw new IllegalStateException("Executing against a different reader than the query has been rewritten against");
|
||||
}
|
||||
return new ConstantScoreWeight(this, boost) {
|
||||
@Override
|
||||
public Scorer scorer(LeafReaderContext context) throws IOException {
|
||||
|
@ -37,7 +37,6 @@ import org.apache.lucene.util.automaton.CharacterRunAutomaton;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.lucene.all.AllTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -62,6 +61,7 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
public static final char MULTIVAL_SEP_CHAR = (char) 0;
|
||||
private static final Snippet[] EMPTY_SNIPPET = new Snippet[0];
|
||||
|
||||
private final OffsetSource offsetSource;
|
||||
private final String fieldValue;
|
||||
private final PassageFormatter passageFormatter;
|
||||
private final BreakIterator breakIterator;
|
||||
@ -71,24 +71,27 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
/**
|
||||
* Creates a new instance of {@link CustomUnifiedHighlighter}
|
||||
*
|
||||
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally
|
||||
* @param analyzer the analyzer used for the field at index time, used for multi term queries internally.
|
||||
* @param passageFormatter our own {@link CustomPassageFormatter}
|
||||
* which generates snippets in forms of {@link Snippet} objects
|
||||
* which generates snippets in forms of {@link Snippet} objects.
|
||||
* @param offsetSource the {@link OffsetSource} to used for offsets retrieval.
|
||||
* @param breakIteratorLocale the {@link Locale} to use for dividing text into passages.
|
||||
* If null {@link Locale#ROOT} is used
|
||||
* If null {@link Locale#ROOT} is used.
|
||||
* @param breakIterator the {@link BreakIterator} to use for dividing text into passages.
|
||||
* If null {@link BreakIterator#getSentenceInstance(Locale)} is used.
|
||||
* @param fieldValue the original field values delimited by MULTIVAL_SEP_CHAR
|
||||
* @param noMatchSize The size of the text that should be returned when no highlighting can be performed
|
||||
* @param fieldValue the original field values delimited by MULTIVAL_SEP_CHAR.
|
||||
* @param noMatchSize The size of the text that should be returned when no highlighting can be performed.
|
||||
*/
|
||||
public CustomUnifiedHighlighter(IndexSearcher searcher,
|
||||
Analyzer analyzer,
|
||||
OffsetSource offsetSource,
|
||||
PassageFormatter passageFormatter,
|
||||
@Nullable Locale breakIteratorLocale,
|
||||
@Nullable BreakIterator breakIterator,
|
||||
String fieldValue,
|
||||
int noMatchSize) {
|
||||
super(searcher, analyzer);
|
||||
this.offsetSource = offsetSource;
|
||||
this.breakIterator = breakIterator;
|
||||
this.breakIteratorLocale = breakIteratorLocale == null ? Locale.ROOT : breakIteratorLocale;
|
||||
this.passageFormatter = passageFormatter;
|
||||
@ -207,10 +210,20 @@ public class CustomUnifiedHighlighter extends UnifiedHighlighter {
|
||||
return Collections.singletonList(new TermQuery(atq.getTerm()));
|
||||
} else if (query instanceof FunctionScoreQuery) {
|
||||
return Collections.singletonList(((FunctionScoreQuery) query).getSubQuery());
|
||||
} else if (query instanceof FiltersFunctionScoreQuery) {
|
||||
return Collections.singletonList(((FiltersFunctionScoreQuery) query).getSubQuery());
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Forces the offset source for this highlighter
|
||||
*/
|
||||
@Override
|
||||
protected OffsetSource getOffsetSource(String field) {
|
||||
if (offsetSource == null) {
|
||||
return super.getOffsetSource(field);
|
||||
}
|
||||
return offsetSource;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -32,7 +32,6 @@ import org.apache.lucene.search.SynonymQuery;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.spans.SpanTermQuery;
|
||||
import org.elasticsearch.common.lucene.search.MultiPhrasePrefixQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FiltersFunctionScoreQuery;
|
||||
import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
|
||||
import org.elasticsearch.index.search.ESToParentBlockJoinQuery;
|
||||
|
||||
@ -69,8 +68,6 @@ public class CustomFieldQuery extends FieldQuery {
|
||||
flatten(((FunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof MultiPhrasePrefixQuery) {
|
||||
flatten(sourceQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof FiltersFunctionScoreQuery) {
|
||||
flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof MultiPhraseQuery) {
|
||||
MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
|
||||
convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
|
||||
|
@ -86,6 +86,8 @@ public class Version implements Comparable<Version> {
|
||||
public static final Version V_5_5_0 = new Version(V_5_5_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
|
||||
public static final int V_5_5_1_ID = 5050199;
|
||||
public static final Version V_5_5_1 = new Version(V_5_5_1_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
|
||||
public static final int V_5_5_2_ID = 5050299;
|
||||
public static final Version V_5_5_2 = new Version(V_5_5_2_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
|
||||
public static final int V_5_6_0_ID = 5060099;
|
||||
public static final Version V_5_6_0 = new Version(V_5_6_0_ID, org.apache.lucene.util.Version.LUCENE_6_6_0);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
@ -97,7 +99,13 @@ public class Version implements Comparable<Version> {
|
||||
public static final int V_6_0_0_beta1_ID = 6000026;
|
||||
public static final Version V_6_0_0_beta1 =
|
||||
new Version(V_6_0_0_beta1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_6_0_0_beta1;
|
||||
public static final int V_6_1_0_ID = 6010099;
|
||||
public static final Version V_6_1_0 =
|
||||
new Version(V_6_1_0_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final int V_7_0_0_alpha1_ID = 7000001;
|
||||
public static final Version V_7_0_0_alpha1 =
|
||||
new Version(V_7_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
public static final Version CURRENT = V_7_0_0_alpha1;
|
||||
|
||||
// unreleased versions must be added to the above list with the suffix _UNRELEASED (with the exception of CURRENT)
|
||||
|
||||
@ -112,6 +120,10 @@ public class Version implements Comparable<Version> {
|
||||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_7_0_0_alpha1_ID:
|
||||
return V_7_0_0_alpha1;
|
||||
case V_6_1_0_ID:
|
||||
return V_6_1_0;
|
||||
case V_6_0_0_beta1_ID:
|
||||
return V_6_0_0_beta1;
|
||||
case V_6_0_0_alpha2_ID:
|
||||
@ -120,6 +132,8 @@ public class Version implements Comparable<Version> {
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_0_ID:
|
||||
return V_5_6_0;
|
||||
case V_5_5_2_ID:
|
||||
return V_5_5_2;
|
||||
case V_5_5_1_ID:
|
||||
return V_5_5_1;
|
||||
case V_5_5_0_ID:
|
||||
@ -307,12 +321,12 @@ public class Version implements Comparable<Version> {
|
||||
public Version minimumCompatibilityVersion() {
|
||||
final int bwcMajor;
|
||||
final int bwcMinor;
|
||||
// TODO: remove this entirely, making it static for each version
|
||||
if (major == 6) { // we only specialize for current major here
|
||||
bwcMajor = Version.V_5_5_0.major;
|
||||
bwcMinor = Version.V_5_5_0.minor;
|
||||
} else if (major > 6) { // all the future versions are compatible with first minor...
|
||||
bwcMajor = major -1;
|
||||
bwcMinor = 0;
|
||||
bwcMajor = Version.V_5_6_0.major;
|
||||
bwcMinor = Version.V_5_6_0.minor;
|
||||
} else if (major == 7) { // we only specialize for current major here
|
||||
return V_6_1_0;
|
||||
} else {
|
||||
bwcMajor = major;
|
||||
bwcMinor = 0;
|
||||
@ -329,6 +343,8 @@ public class Version implements Comparable<Version> {
|
||||
final int bwcMajor;
|
||||
if (major == 5) {
|
||||
bwcMajor = 2; // we jumped from 2 to 5
|
||||
} else if (major == 7) {
|
||||
return V_6_0_0_beta1;
|
||||
} else {
|
||||
bwcMajor = major - 1;
|
||||
}
|
||||
@ -378,6 +394,10 @@ public class Version implements Comparable<Version> {
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
public static String displayVersion(final Version version, final boolean isSnapshot) {
|
||||
return version + (isSnapshot ? "-SNAPSHOT" : "");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
|
@ -24,7 +24,6 @@ import org.elasticsearch.common.CheckedConsumer;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.storedscripts;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -31,17 +32,15 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
public class DeleteStoredScriptRequest extends AcknowledgedRequest<DeleteStoredScriptRequest> {
|
||||
|
||||
private String id;
|
||||
private String lang;
|
||||
|
||||
DeleteStoredScriptRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public DeleteStoredScriptRequest(String id, String lang) {
|
||||
public DeleteStoredScriptRequest(String id) {
|
||||
super();
|
||||
|
||||
this.id = id;
|
||||
this.lang = lang;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -54,10 +53,6 @@ public class DeleteStoredScriptRequest extends AcknowledgedRequest<DeleteStoredS
|
||||
validationException = addValidationError("id cannot contain '#' for stored script", validationException);
|
||||
}
|
||||
|
||||
if (lang != null && lang.contains("#")) {
|
||||
validationException = addValidationError("lang cannot contain '#' for stored script", validationException);
|
||||
}
|
||||
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@ -71,24 +66,12 @@ public class DeleteStoredScriptRequest extends AcknowledgedRequest<DeleteStoredS
|
||||
return this;
|
||||
}
|
||||
|
||||
public String lang() {
|
||||
return lang;
|
||||
}
|
||||
|
||||
public DeleteStoredScriptRequest lang(String lang) {
|
||||
this.lang = lang;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
||||
lang = in.readString();
|
||||
|
||||
if (lang.isEmpty()) {
|
||||
lang = null;
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha2)) {
|
||||
in.readString(); // read lang from previous versions
|
||||
}
|
||||
|
||||
id = in.readString();
|
||||
@ -98,12 +81,15 @@ public class DeleteStoredScriptRequest extends AcknowledgedRequest<DeleteStoredS
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
||||
out.writeString(lang == null ? "" : lang);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha2)) {
|
||||
out.writeString(""); // write an empty lang to previous versions
|
||||
}
|
||||
|
||||
out.writeString(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "delete stored script {id [" + id + "]" + (lang != null ? ", lang [" + lang + "]" : "") + "}";
|
||||
return "delete stored script {id [" + id + "]}";
|
||||
}
|
||||
}
|
||||
|
@ -29,12 +29,6 @@ public class DeleteStoredScriptRequestBuilder extends AcknowledgedRequestBuilder
|
||||
super(client, action, new DeleteStoredScriptRequest());
|
||||
}
|
||||
|
||||
public DeleteStoredScriptRequestBuilder setLang(String lang) {
|
||||
request.lang(lang);
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
public DeleteStoredScriptRequestBuilder setId(String id) {
|
||||
request.id(id);
|
||||
|
||||
|
@ -19,10 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.storedscripts;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
@ -33,17 +32,15 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
public class GetStoredScriptRequest extends MasterNodeReadRequest<GetStoredScriptRequest> {
|
||||
|
||||
protected String id;
|
||||
protected String lang;
|
||||
|
||||
GetStoredScriptRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public GetStoredScriptRequest(String id, String lang) {
|
||||
public GetStoredScriptRequest(String id) {
|
||||
super();
|
||||
|
||||
this.id = id;
|
||||
this.lang = lang;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -56,10 +53,6 @@ public class GetStoredScriptRequest extends MasterNodeReadRequest<GetStoredScrip
|
||||
validationException = addValidationError("id cannot contain '#' for stored script", validationException);
|
||||
}
|
||||
|
||||
if (lang != null && lang.contains("#")) {
|
||||
validationException = addValidationError("lang cannot contain '#' for stored script", validationException);
|
||||
}
|
||||
|
||||
return validationException;
|
||||
}
|
||||
|
||||
@ -73,24 +66,12 @@ public class GetStoredScriptRequest extends MasterNodeReadRequest<GetStoredScrip
|
||||
return this;
|
||||
}
|
||||
|
||||
public String lang() {
|
||||
return lang;
|
||||
}
|
||||
|
||||
public GetStoredScriptRequest lang(String lang) {
|
||||
this.lang = lang;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
||||
lang = in.readString();
|
||||
|
||||
if (lang.isEmpty()) {
|
||||
lang = null;
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha2)) {
|
||||
in.readString(); // read lang from previous versions
|
||||
}
|
||||
|
||||
id = in.readString();
|
||||
@ -100,12 +81,15 @@ public class GetStoredScriptRequest extends MasterNodeReadRequest<GetStoredScrip
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
||||
out.writeString(lang == null ? "" : lang);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha2)) {
|
||||
out.writeString(""); // write an empty lang to previous versions
|
||||
}
|
||||
|
||||
out.writeString(id);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "get script [" + lang + "][" + id + "]";
|
||||
return "get script [" + id + "]";
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.storedscripts;
|
||||
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
public class GetStoredScriptRequestBuilder extends MasterNodeReadOperationRequestBuilder<GetStoredScriptRequest,
|
||||
GetStoredScriptResponse, GetStoredScriptRequestBuilder> {
|
||||
@ -31,11 +30,6 @@ public class GetStoredScriptRequestBuilder extends MasterNodeReadOperationReques
|
||||
super(client, action, new GetStoredScriptRequest());
|
||||
}
|
||||
|
||||
public GetStoredScriptRequestBuilder setLang(@Nullable String lang) {
|
||||
request.lang(lang);
|
||||
return this;
|
||||
}
|
||||
|
||||
public GetStoredScriptRequestBuilder setId(String id) {
|
||||
request.id(id);
|
||||
return this;
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.script.StoredScriptSource;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
@ -37,22 +38,22 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptRequest> {
|
||||
|
||||
private String id;
|
||||
private String lang;
|
||||
private String context;
|
||||
private BytesReference content;
|
||||
private XContentType xContentType;
|
||||
private StoredScriptSource source;
|
||||
|
||||
public PutStoredScriptRequest() {
|
||||
super();
|
||||
}
|
||||
|
||||
public PutStoredScriptRequest(String id, String lang, String context, BytesReference content, XContentType xContentType) {
|
||||
public PutStoredScriptRequest(String id, String context, BytesReference content, XContentType xContentType, StoredScriptSource source) {
|
||||
super();
|
||||
this.id = id;
|
||||
this.lang = lang;
|
||||
this.context = context;
|
||||
this.content = content;
|
||||
this.xContentType = Objects.requireNonNull(xContentType);
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -65,10 +66,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
validationException = addValidationError("id cannot contain '#' for stored script", validationException);
|
||||
}
|
||||
|
||||
if (lang != null && lang.contains("#")) {
|
||||
validationException = addValidationError("lang cannot contain '#' for stored script", validationException);
|
||||
}
|
||||
|
||||
if (content == null) {
|
||||
validationException = addValidationError("must specify code for stored script", validationException);
|
||||
}
|
||||
@ -82,17 +79,6 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
|
||||
public PutStoredScriptRequest id(String id) {
|
||||
this.id = id;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
public String lang() {
|
||||
return lang;
|
||||
}
|
||||
|
||||
public PutStoredScriptRequest lang(String lang) {
|
||||
this.lang = lang;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -113,12 +99,17 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
return xContentType;
|
||||
}
|
||||
|
||||
public StoredScriptSource source() {
|
||||
return source;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the script source and the content type of the bytes.
|
||||
*/
|
||||
public PutStoredScriptRequest content(BytesReference content, XContentType xContentType) {
|
||||
this.content = content;
|
||||
this.xContentType = Objects.requireNonNull(xContentType);
|
||||
this.source = StoredScriptSource.parse(content, xContentType);
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -126,12 +117,9 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
||||
lang = in.readString();
|
||||
|
||||
if (lang.isEmpty()) {
|
||||
lang = null;
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha2)) {
|
||||
in.readString(); // read lang from previous versions
|
||||
}
|
||||
|
||||
id = in.readOptionalString();
|
||||
content = in.readBytesReference();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
@ -141,6 +129,9 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
context = in.readOptionalString();
|
||||
source = new StoredScriptSource(in);
|
||||
} else {
|
||||
source = StoredScriptSource.parse(content, xContentType == null ? XContentType.JSON : xContentType);
|
||||
}
|
||||
}
|
||||
|
||||
@ -148,7 +139,9 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
|
||||
out.writeString(lang == null ? "" : lang);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha2)) {
|
||||
out.writeString(source == null ? "" : source.getLang());
|
||||
}
|
||||
out.writeOptionalString(id);
|
||||
out.writeBytesReference(content);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
@ -156,6 +149,7 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha2)) {
|
||||
out.writeOptionalString(context);
|
||||
source.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,6 +163,8 @@ public class PutStoredScriptRequest extends AcknowledgedRequest<PutStoredScriptR
|
||||
// ignore
|
||||
}
|
||||
|
||||
return "put stored script {id [" + id + "]" + (lang != null ? ", lang [" + lang + "]" : "") + ", content [" + source + "]}";
|
||||
return "put stored script {id [" + id + "]" +
|
||||
(context != null ? ", context [" + context + "]" : "") +
|
||||
", content [" + source + "]}";
|
||||
}
|
||||
}
|
||||
|
@ -43,9 +43,4 @@ public class PutStoredScriptRequestBuilder extends AcknowledgedRequestBuilder<Pu
|
||||
request.content(source, xContentType);
|
||||
return this;
|
||||
}
|
||||
|
||||
public PutStoredScriptRequestBuilder setLang(String lang) {
|
||||
request.lang(lang);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.cache.clear;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
@ -29,10 +31,9 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
|
||||
private boolean queryCache = false;
|
||||
private boolean fieldDataCache = false;
|
||||
private boolean recycler = false;
|
||||
private boolean requestCache = false;
|
||||
private String[] fields = null;
|
||||
|
||||
private String[] fields = Strings.EMPTY_ARRAY;
|
||||
|
||||
|
||||
public ClearIndicesCacheRequest() {
|
||||
}
|
||||
@ -69,7 +70,7 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequest fields(String... fields) {
|
||||
this.fields = fields;
|
||||
this.fields = fields == null ? Strings.EMPTY_ARRAY : fields;
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -77,21 +78,14 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
return this.fields;
|
||||
}
|
||||
|
||||
public ClearIndicesCacheRequest recycler(boolean recycler) {
|
||||
this.recycler = recycler;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean recycler() {
|
||||
return this.recycler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
queryCache = in.readBoolean();
|
||||
fieldDataCache = in.readBoolean();
|
||||
recycler = in.readBoolean();
|
||||
if (in.getVersion().before(Version.V_6_0_0_beta1)) {
|
||||
in.readBoolean(); // recycler
|
||||
}
|
||||
fields = in.readStringArray();
|
||||
requestCache = in.readBoolean();
|
||||
}
|
||||
@ -101,7 +95,9 @@ public class ClearIndicesCacheRequest extends BroadcastRequest<ClearIndicesCache
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(queryCache);
|
||||
out.writeBoolean(fieldDataCache);
|
||||
out.writeBoolean(recycler);
|
||||
if (out.getVersion().before(Version.V_6_0_0_beta1)) {
|
||||
out.writeBoolean(false); // recycler
|
||||
}
|
||||
out.writeStringArrayNullable(fields);
|
||||
out.writeBoolean(requestCache);
|
||||
}
|
||||
|
@ -32,8 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
@ -45,7 +43,8 @@ import java.util.List;
|
||||
/**
|
||||
* Indices clear cache action.
|
||||
*/
|
||||
public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse,
|
||||
TransportBroadcastByNodeAction.EmptyResult> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@ -53,8 +52,8 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
||||
public TransportClearIndicesCacheAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false);
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, ClearIndicesCacheRequest::new, ThreadPool.Names.MANAGEMENT, false);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@ -64,7 +63,9 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
|
||||
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards,
|
||||
int failedShards, List<EmptyResult> responses,
|
||||
List<ShardOperationFailedException> shardFailures, ClusterState clusterState) {
|
||||
return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@ -77,46 +78,8 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAc
|
||||
|
||||
@Override
|
||||
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
||||
IndexService service = indicesService.indexService(shardRouting.index());
|
||||
if (service != null) {
|
||||
IndexShard shard = service.getShardOrNull(shardRouting.id());
|
||||
boolean clearedAtLeastOne = false;
|
||||
if (request.queryCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
service.cache().query().clear("api");
|
||||
}
|
||||
if (request.fieldDataCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
if (request.fields() == null || request.fields().length == 0) {
|
||||
service.fieldData().clear();
|
||||
} else {
|
||||
for (String field : request.fields()) {
|
||||
service.fieldData().clearField(field);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (request.requestCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
indicesService.clearRequestCache(shard);
|
||||
}
|
||||
if (request.recycler()) {
|
||||
logger.debug("Clear CacheRecycler on index [{}]", service.index());
|
||||
clearedAtLeastOne = true;
|
||||
// cacheRecycler.clear();
|
||||
}
|
||||
if (!clearedAtLeastOne) {
|
||||
if (request.fields() != null && request.fields().length > 0) {
|
||||
// only clear caches relating to the specified fields
|
||||
for (String field : request.fields()) {
|
||||
service.fieldData().clearField(field);
|
||||
}
|
||||
} else {
|
||||
service.cache().clear("api");
|
||||
service.fieldData().clear();
|
||||
indicesService.clearRequestCache(shard);
|
||||
}
|
||||
}
|
||||
}
|
||||
indicesService.clearIndexShardCache(shardRouting.shardId(), request.queryCache(), request.fieldDataCache(), request.requestCache(),
|
||||
request.fields());
|
||||
return EmptyResult.INSTANCE;
|
||||
}
|
||||
|
||||
|
@ -464,7 +464,6 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
|
||||
public static Translog.Location performOnReplica(BulkShardRequest request, IndexShard replica) throws Exception {
|
||||
Translog.Location location = null;
|
||||
final long primaryTerm = request.primaryTerm();
|
||||
for (int i = 0; i < request.items().length; i++) {
|
||||
BulkItemRequest item = request.items()[i];
|
||||
final Engine.Result operationResult;
|
||||
@ -473,7 +472,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
switch (replicaItemExecutionMode(item, i)) {
|
||||
case NORMAL:
|
||||
final DocWriteResponse primaryResponse = item.getPrimaryResponse().getResponse();
|
||||
operationResult = performOpOnReplica(primaryResponse, docWriteRequest, primaryTerm, replica);
|
||||
operationResult = performOpOnReplica(primaryResponse, docWriteRequest, replica);
|
||||
assert operationResult != null : "operation result must never be null when primary response has no failure";
|
||||
location = syncOperationResultOrThrow(operationResult, location);
|
||||
break;
|
||||
@ -482,7 +481,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
case FAILURE:
|
||||
final BulkItemResponse.Failure failure = item.getPrimaryResponse().getFailure();
|
||||
assert failure.getSeqNo() != SequenceNumbersService.UNASSIGNED_SEQ_NO : "seq no must be assigned";
|
||||
operationResult = replica.markSeqNoAsNoop(failure.getSeqNo(), primaryTerm, failure.getMessage());
|
||||
operationResult = replica.markSeqNoAsNoop(failure.getSeqNo(), failure.getMessage());
|
||||
assert operationResult != null : "operation result must never be null when primary response has no failure";
|
||||
location = syncOperationResultOrThrow(operationResult, location);
|
||||
break;
|
||||
@ -501,7 +500,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
}
|
||||
|
||||
private static Engine.Result performOpOnReplica(DocWriteResponse primaryResponse, DocWriteRequest docWriteRequest,
|
||||
long primaryTerm, IndexShard replica) throws Exception {
|
||||
IndexShard replica) throws Exception {
|
||||
switch (docWriteRequest.opType()) {
|
||||
case CREATE:
|
||||
case INDEX:
|
||||
@ -511,7 +510,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
SourceToParse.source(shardId.getIndexName(),
|
||||
indexRequest.type(), indexRequest.id(), indexRequest.source(), indexRequest.getContentType())
|
||||
.routing(indexRequest.routing()).parent(indexRequest.parent());
|
||||
return replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryTerm, primaryResponse.getVersion(),
|
||||
return replica.applyIndexOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(),
|
||||
indexRequest.versionType().versionTypeForReplicationAndRecovery(), indexRequest.getAutoGeneratedTimestamp(),
|
||||
indexRequest.isRetry(), sourceToParse, update -> {
|
||||
throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
|
||||
@ -519,7 +518,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
||||
});
|
||||
case DELETE:
|
||||
DeleteRequest deleteRequest = (DeleteRequest) docWriteRequest;
|
||||
return replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryTerm, primaryResponse.getVersion(),
|
||||
return replica.applyDeleteOperationOnReplica(primaryResponse.getSeqNo(), primaryResponse.getVersion(),
|
||||
deleteRequest.type(), deleteRequest.id(), deleteRequest.versionType().versionTypeForReplicationAndRecovery(),
|
||||
update -> {
|
||||
throw new TransportReplicationAction.RetryOnReplicaException(replica.shardId(),
|
||||
|
@ -223,26 +223,6 @@ public class DeleteRequest extends ReplicatedWriteRequest<DeleteRequest> impleme
|
||||
return "delete {[" + index + "][" + type + "][" + id + "]}";
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public long primaryTerm() {
|
||||
throw new UnsupportedOperationException("primary term should never be set on DeleteRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the DeleteRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public void primaryTerm(long term) {
|
||||
throw new UnsupportedOperationException("primary term should never be set on DeleteRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
|
@ -542,11 +542,7 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||
pipeline = in.readOptionalString();
|
||||
isRetry = in.readBoolean();
|
||||
autoGeneratedTimestamp = in.readLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
contentType = in.readOptionalWriteable(XContentType::readFrom);
|
||||
} else {
|
||||
contentType = XContentFactory.xContentType(source);
|
||||
}
|
||||
contentType = in.readOptionalWriteable(XContentType::readFrom);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -565,19 +561,12 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||
}
|
||||
out.writeBytesReference(source);
|
||||
out.writeByte(opType.getId());
|
||||
// ES versions below 5.1.2 don't know about resolveVersionDefaults but resolve the version eagerly (which messes with validation).
|
||||
if (out.getVersion().before(Version.V_5_1_2)) {
|
||||
out.writeLong(resolveVersionDefaults());
|
||||
} else {
|
||||
out.writeLong(version);
|
||||
}
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeOptionalString(pipeline);
|
||||
out.writeBoolean(isRetry);
|
||||
out.writeLong(autoGeneratedTimestamp);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_3_0)) {
|
||||
out.writeOptionalWriteable(contentType);
|
||||
}
|
||||
out.writeOptionalWriteable(contentType);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -617,26 +606,6 @@ public class IndexRequest extends ReplicatedWriteRequest<IndexRequest> implement
|
||||
return autoGeneratedTimestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public long primaryTerm() {
|
||||
throw new UnsupportedOperationException("primary term should never be set on IndexRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
* use because the IndexRequest object will always be wrapped in a bulk request envelope, which is where this *should* be set.
|
||||
*/
|
||||
@Override
|
||||
public void primaryTerm(long term) {
|
||||
throw new UnsupportedOperationException("primary term should never be set on IndexRequest");
|
||||
}
|
||||
|
||||
/**
|
||||
* Override this method from ReplicationAction, this is where we are storing our state in the request object (which we really shouldn't
|
||||
* do). Once the transport client goes away we can move away from making this available, but in the meantime this is dangerous to set or
|
||||
|
@ -174,16 +174,24 @@ public class SimulatePipelineRequest extends ActionRequest {
|
||||
}
|
||||
|
||||
private static List<IngestDocument> parseDocs(Map<String, Object> config) {
|
||||
List<Map<String, Object>> docs = ConfigurationUtils.readList(null, null, config, Fields.DOCS);
|
||||
List<Map<String, Object>> docs =
|
||||
ConfigurationUtils.readList(null, null, config, Fields.DOCS);
|
||||
List<IngestDocument> ingestDocumentList = new ArrayList<>();
|
||||
for (Map<String, Object> dataMap : docs) {
|
||||
Map<String, Object> document = ConfigurationUtils.readMap(null, null, dataMap, Fields.SOURCE);
|
||||
IngestDocument ingestDocument = new IngestDocument(ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.INDEX.getFieldName(), "_index"),
|
||||
ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.TYPE.getFieldName(), "_type"),
|
||||
ConfigurationUtils.readStringProperty(null, null, dataMap, MetaData.ID.getFieldName(), "_id"),
|
||||
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.ROUTING.getFieldName()),
|
||||
ConfigurationUtils.readOptionalStringProperty(null, null, dataMap, MetaData.PARENT.getFieldName()),
|
||||
document);
|
||||
Map<String, Object> document = ConfigurationUtils.readMap(null, null,
|
||||
dataMap, Fields.SOURCE);
|
||||
String index = ConfigurationUtils.readStringOrIntProperty(null, null,
|
||||
dataMap, MetaData.INDEX.getFieldName(), "_index");
|
||||
String type = ConfigurationUtils.readStringOrIntProperty(null, null,
|
||||
dataMap, MetaData.TYPE.getFieldName(), "_type");
|
||||
String id = ConfigurationUtils.readStringOrIntProperty(null, null,
|
||||
dataMap, MetaData.ID.getFieldName(), "_id");
|
||||
String routing = ConfigurationUtils.readOptionalStringOrIntProperty(null, null,
|
||||
dataMap, MetaData.ROUTING.getFieldName());
|
||||
String parent = ConfigurationUtils.readOptionalStringOrIntProperty(null, null,
|
||||
dataMap, MetaData.PARENT.getFieldName());
|
||||
IngestDocument ingestDocument =
|
||||
new IngestDocument(index, type, id, routing, parent, document);
|
||||
ingestDocumentList.add(ingestDocument);
|
||||
}
|
||||
return ingestDocumentList;
|
||||
|
@ -41,7 +41,7 @@ public class MainResponse extends ActionResponse implements ToXContentObject {
|
||||
private ClusterName clusterName;
|
||||
private String clusterUuid;
|
||||
private Build build;
|
||||
private boolean available;
|
||||
boolean available;
|
||||
|
||||
MainResponse() {
|
||||
}
|
||||
@ -113,6 +113,8 @@ public class MainResponse extends ActionResponse implements ToXContentObject {
|
||||
.field("build_date", build.date())
|
||||
.field("build_snapshot", build.isSnapshot())
|
||||
.field("lucene_version", version.luceneVersion.toString())
|
||||
.field("minimum_wire_compatibility_version", version.minimumCompatibilityVersion().toString())
|
||||
.field("minimum_index_compatibility_version", version.minimumIndexCompatibilityVersion().toString())
|
||||
.endObject();
|
||||
builder.field("tagline", "You Know, for Search");
|
||||
builder.endObject();
|
||||
@ -120,7 +122,7 @@ public class MainResponse extends ActionResponse implements ToXContentObject {
|
||||
}
|
||||
|
||||
private static final ObjectParser<MainResponse, Void> PARSER = new ObjectParser<>(MainResponse.class.getName(), true,
|
||||
() -> new MainResponse());
|
||||
MainResponse::new);
|
||||
|
||||
static {
|
||||
PARSER.declareString((response, value) -> response.nodeName = value, new ParseField("name"));
|
||||
|
@ -80,9 +80,9 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ReplicationOperation.Replicas newReplicasProxy() {
|
||||
protected ReplicationOperation.Replicas newReplicasProxy(long primaryTerm) {
|
||||
// We treat the resync as best-effort for now and don't mark unavailable shard copies as stale.
|
||||
return new ReplicasProxy();
|
||||
return new ReplicasProxy(primaryTerm);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -93,7 +93,7 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
|
||||
if (node.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) {
|
||||
super.sendReplicaRequest(replicaRequest, node, listener);
|
||||
} else {
|
||||
listener.onResponse(new ReplicaResponse(SequenceNumbersService.UNASSIGNED_SEQ_NO));
|
||||
listener.onResponse(new ReplicaResponse(SequenceNumbersService.PRE_60_NODE_LOCAL_CHECKPOINT));
|
||||
}
|
||||
}
|
||||
|
||||
@ -135,13 +135,13 @@ public class TransportResyncReplicationAction extends TransportWriteAction<Resyn
|
||||
}
|
||||
|
||||
@Override
|
||||
public void sync(ResyncReplicationRequest request, Task parentTask, String primaryAllocationId,
|
||||
public void sync(ResyncReplicationRequest request, Task parentTask, String primaryAllocationId, long primaryTerm,
|
||||
ActionListener<ResyncReplicationResponse> listener) {
|
||||
// skip reroute phase
|
||||
transportService.sendChildRequest(
|
||||
clusterService.localNode(),
|
||||
transportPrimaryAction,
|
||||
new ConcreteShardRequest<>(request, primaryAllocationId),
|
||||
new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm),
|
||||
parentTask,
|
||||
transportOptions,
|
||||
new TransportResponseHandler<ResyncReplicationResponse>() {
|
||||
|
@ -316,8 +316,8 @@ abstract class AbstractSearchAsyncAction<Result extends SearchPhaseResult> exten
|
||||
|
||||
@Override
|
||||
protected void skipShard(SearchShardIterator iterator) {
|
||||
super.skipShard(iterator);
|
||||
successfulOps.incrementAndGet();
|
||||
skippedOps.incrementAndGet();
|
||||
super.skipShard(iterator);
|
||||
}
|
||||
}
|
||||
|
@ -27,6 +27,7 @@ import org.elasticsearch.search.SearchShardTarget;
|
||||
* received by this listener.
|
||||
*/
|
||||
abstract class SearchActionListener<T extends SearchPhaseResult> implements ActionListener<T> {
|
||||
|
||||
private final int requestIndex;
|
||||
private final SearchShardTarget searchShardTarget;
|
||||
|
||||
|
@ -0,0 +1,75 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.node.ResponseCollectorService;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
|
||||
import java.util.Objects;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
/**
|
||||
* A wrapper of search action listeners (search results) that unwraps the query
|
||||
* result to get the piggybacked queue size and service time EWMA, adding those
|
||||
* values to the coordinating nodes' {@code ResponseCollectorService}.
|
||||
*/
|
||||
public final class SearchExecutionStatsCollector implements ActionListener<SearchPhaseResult> {
|
||||
|
||||
private final ActionListener<SearchPhaseResult> listener;
|
||||
private final String nodeId;
|
||||
private final ResponseCollectorService collector;
|
||||
private final long startNanos;
|
||||
|
||||
SearchExecutionStatsCollector(ActionListener<SearchPhaseResult> listener,
|
||||
ResponseCollectorService collector,
|
||||
String nodeId) {
|
||||
this.listener = Objects.requireNonNull(listener, "listener cannot be null");
|
||||
this.collector = Objects.requireNonNull(collector, "response collector cannot be null");
|
||||
this.startNanos = System.nanoTime();
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
public static BiFunction<Transport.Connection, SearchActionListener, ActionListener> makeWrapper(ResponseCollectorService service) {
|
||||
return (connection, originalListener) -> new SearchExecutionStatsCollector(originalListener, service, connection.getNode().getId());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(SearchPhaseResult response) {
|
||||
QuerySearchResult queryResult = response.queryResult();
|
||||
if (nodeId != null && queryResult != null) {
|
||||
final long serviceTimeEWMA = queryResult.serviceTimeEWMA();
|
||||
final int queueSize = queryResult.nodeQueueSize();
|
||||
final long responseDuration = System.nanoTime() - startNanos;
|
||||
// EWMA/queue size may be -1 if the query node doesn't support capturing it
|
||||
if (serviceTimeEWMA > 0 && queueSize > 0) {
|
||||
collector.addNodeStatistics(nodeId, queueSize, responseDuration, serviceTimeEWMA);
|
||||
}
|
||||
}
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
@ -406,7 +406,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
requestCache = in.readOptionalBoolean();
|
||||
batchedReduceSize = in.readVInt();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
maxConcurrentShardRequests = in.readVInt();
|
||||
preFilterShardSize = in.readVInt();
|
||||
}
|
||||
@ -428,7 +428,7 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
out.writeOptionalBoolean(requestCache);
|
||||
out.writeVInt(batchedReduceSize);
|
||||
if (out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
|
||||
if (out.getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
out.writeVInt(maxConcurrentShardRequests);
|
||||
out.writeVInt(preFilterShardSize);
|
||||
}
|
||||
|
@ -324,7 +324,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
||||
}
|
||||
scrollId = in.readOptionalString();
|
||||
tookInMillis = in.readVLong();
|
||||
if (in.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
|
||||
if (in.getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
skippedShards = in.readVInt();
|
||||
}
|
||||
}
|
||||
@ -343,7 +343,7 @@ public class SearchResponse extends ActionResponse implements StatusToXContentOb
|
||||
|
||||
out.writeOptionalString(scrollId);
|
||||
out.writeVLong(tookInMillis);
|
||||
if(out.getVersion().onOrAfter(Version.V_6_0_0_beta1)) {
|
||||
if(out.getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
out.writeVInt(skippedShards);
|
||||
}
|
||||
}
|
||||
|
@ -56,6 +56,8 @@ import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
@ -77,10 +79,13 @@ public class SearchTransportService extends AbstractComponent {
|
||||
public static final String QUERY_CAN_MATCH_NAME = "indices:data/read/search[can_match]";
|
||||
|
||||
private final TransportService transportService;
|
||||
private final BiFunction<Transport.Connection, SearchActionListener, ActionListener> responseWrapper;
|
||||
|
||||
public SearchTransportService(Settings settings, TransportService transportService) {
|
||||
public SearchTransportService(Settings settings, TransportService transportService,
|
||||
BiFunction<Transport.Connection, SearchActionListener, ActionListener> responseWrapper) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.responseWrapper = responseWrapper;
|
||||
}
|
||||
|
||||
public void sendFreeContext(Transport.Connection connection, final long contextId, OriginalIndices originalIndices) {
|
||||
@ -105,7 +110,7 @@ public class SearchTransportService extends AbstractComponent {
|
||||
|
||||
public void sendCanMatch(Transport.Connection connection, final ShardSearchTransportRequest request, SearchTask task, final
|
||||
ActionListener<CanMatchResponse> listener) {
|
||||
if (connection.getNode().getVersion().onOrAfter(Version.CURRENT.minimumCompatibilityVersion())) {
|
||||
if (connection.getNode().getVersion().onOrAfter(Version.V_5_6_0)) {
|
||||
transportService.sendChildRequest(connection, QUERY_CAN_MATCH_NAME, request, task,
|
||||
TransportRequestOptions.EMPTY, new ActionListenerResponseHandler<>(listener, CanMatchResponse::new));
|
||||
} else {
|
||||
@ -114,8 +119,7 @@ public class SearchTransportService extends AbstractComponent {
|
||||
// instead of sending the request we shortcut it here and let the caller deal with this -- see #25704
|
||||
// also failing the request instead of returning a fake answer might trigger a retry on a replica which might be on a
|
||||
// compatible node
|
||||
throw new IllegalArgumentException("can_match is not supported on pre "+ Version.CURRENT.minimumCompatibilityVersion() +
|
||||
" nodes");
|
||||
throw new IllegalArgumentException("can_match is not supported on pre 5.6 nodes");
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,8 +140,10 @@ public class SearchTransportService extends AbstractComponent {
|
||||
// this used to be the QUERY_AND_FETCH which doesn't exist anymore.
|
||||
final boolean fetchDocuments = request.numberOfShards() == 1;
|
||||
Supplier<SearchPhaseResult> supplier = fetchDocuments ? QueryFetchSearchResult::new : QuerySearchResult::new;
|
||||
|
||||
final ActionListener handler = responseWrapper.apply(connection, listener);
|
||||
transportService.sendChildRequest(connection, QUERY_ACTION_NAME, request, task,
|
||||
new ActionListenerResponseHandler<>(listener, supplier));
|
||||
new ActionListenerResponseHandler<>(handler, supplier));
|
||||
}
|
||||
|
||||
public void sendExecuteQuery(Transport.Connection connection, final QuerySearchRequest request, SearchTask task,
|
||||
@ -312,23 +318,57 @@ public class SearchTransportService extends AbstractComponent {
|
||||
TransportActionProxy.registerProxyAction(transportService, CLEAR_SCROLL_CONTEXTS_ACTION_NAME,
|
||||
() -> TransportResponse.Empty.INSTANCE);
|
||||
|
||||
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SAME,
|
||||
new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
DfsSearchResult result = searchService.executeDfsPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeDfsPhase(request, (SearchTask) task, new ActionListener<SearchPhaseResult>() {
|
||||
@Override
|
||||
public void onResponse(SearchPhaseResult searchPhaseResult) {
|
||||
try {
|
||||
channel.sendResponse(searchPhaseResult);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
throw new UncheckedIOException(e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, DFS_ACTION_NAME, DfsSearchResult::new);
|
||||
|
||||
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SAME,
|
||||
new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
SearchPhaseResult result = searchService.executeQueryPhase(request, (SearchTask)task);
|
||||
channel.sendResponse(result);
|
||||
searchService.executeQueryPhase(request, (SearchTask) task, new ActionListener<SearchPhaseResult>() {
|
||||
@Override
|
||||
public void onResponse(SearchPhaseResult searchPhaseResult) {
|
||||
try {
|
||||
channel.sendResponse(searchPhaseResult);
|
||||
} catch (IOException e) {
|
||||
throw new UncheckedIOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
try {
|
||||
channel.sendResponse(e);
|
||||
} catch (IOException e1) {
|
||||
throw new UncheckedIOException(e1);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
TransportActionProxy.registerProxyAction(transportService, QUERY_ACTION_NAME, QuerySearchResult::new);
|
||||
@ -384,8 +424,8 @@ public class SearchTransportService extends AbstractComponent {
|
||||
TransportActionProxy.registerProxyAction(transportService, FETCH_ID_ACTION_NAME, FetchSearchResult::new);
|
||||
|
||||
// this is super cheap and should not hit thread-pool rejections
|
||||
transportService.registerRequestHandler(QUERY_CAN_MATCH_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
false, true, new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
|
||||
transportService.registerRequestHandler(QUERY_CAN_MATCH_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SAME,
|
||||
new TaskAwareTransportRequestHandler<ShardSearchTransportRequest>() {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel, Task task) throws Exception {
|
||||
boolean canMatch = searchService.canMatch(request);
|
||||
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.query.Rewriteable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
@ -178,28 +179,39 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
||||
final long relativeStartNanos = System.nanoTime();
|
||||
final SearchTimeProvider timeProvider =
|
||||
new SearchTimeProvider(absoluteStartMillis, relativeStartNanos, System::nanoTime);
|
||||
|
||||
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(searchRequest.indicesOptions(),
|
||||
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||
OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||
if (remoteClusterIndices.isEmpty()) {
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteClusterIndices, Collections.emptyList(),
|
||||
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, clusterState.getNodes()
|
||||
.getDataNodes().size());
|
||||
ActionListener<SearchSourceBuilder> rewriteListener = ActionListener.wrap(source -> {
|
||||
if (source != searchRequest.source()) {
|
||||
// only set it if it changed - we don't allow null values to be set but it might be already null be we want to catch
|
||||
// situations when it possible due to a bug changes to null
|
||||
searchRequest.source(source);
|
||||
}
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
final Map<String, OriginalIndices> remoteClusterIndices = remoteClusterService.groupIndices(searchRequest.indicesOptions(),
|
||||
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
|
||||
OriginalIndices localIndices = remoteClusterIndices.remove(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY);
|
||||
if (remoteClusterIndices.isEmpty()) {
|
||||
executeSearch((SearchTask)task, timeProvider, searchRequest, localIndices, remoteClusterIndices, Collections.emptyList(),
|
||||
(clusterName, nodeId) -> null, clusterState, Collections.emptyMap(), listener, clusterState.getNodes()
|
||||
.getDataNodes().size());
|
||||
} else {
|
||||
remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(),
|
||||
searchRequest.routing(), remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> {
|
||||
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
|
||||
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
|
||||
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
|
||||
remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
|
||||
int numNodesInvovled = searchShardsResponses.values().stream().mapToInt(r -> r.getNodes().length).sum()
|
||||
+ clusterState.getNodes().getDataNodes().size();
|
||||
executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, remoteClusterIndices,
|
||||
remoteShardIterators, clusterNodeLookup, clusterState, remoteAliasFilters, listener, numNodesInvovled);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}, listener::onFailure);
|
||||
if (searchRequest.source() == null) {
|
||||
rewriteListener.onResponse(searchRequest.source());
|
||||
} else {
|
||||
remoteClusterService.collectSearchShards(searchRequest.indicesOptions(), searchRequest.preference(), searchRequest.routing(),
|
||||
remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> {
|
||||
List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
|
||||
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
|
||||
BiFunction<String, String, DiscoveryNode> clusterNodeLookup = processRemoteShards(searchShardsResponses,
|
||||
remoteClusterIndices, remoteShardIterators, remoteAliasFilters);
|
||||
int numNodesInvovled = searchShardsResponses.values().stream().mapToInt(r -> r.getNodes().length).sum()
|
||||
+ clusterState.getNodes().getDataNodes().size();
|
||||
executeSearch((SearchTask) task, timeProvider, searchRequest, localIndices, remoteClusterIndices, remoteShardIterators,
|
||||
clusterNodeLookup, clusterState, remoteAliasFilters, listener, numNodesInvovled);
|
||||
}, listener::onFailure));
|
||||
Rewriteable.rewriteAndFetch(searchRequest.source(), searchService.getRewriteContext(timeProvider::getAbsoluteStartMillis),
|
||||
rewriteListener);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.routing.AllocationId;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
@ -42,13 +41,10 @@ import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
public class ReplicationOperation<
|
||||
Request extends ReplicationRequest<Request>,
|
||||
@ -108,7 +104,6 @@ public class ReplicationOperation<
|
||||
primary.updateLocalCheckpointForShard(primaryRouting.allocationId().getId(), primary.localCheckpoint());
|
||||
final ReplicaRequest replicaRequest = primaryResult.replicaRequest();
|
||||
if (replicaRequest != null) {
|
||||
assert replicaRequest.primaryTerm() > 0 : "replicaRequest doesn't have a primary term";
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("[{}] op [{}] completed on primary for request [{}]", primaryId, opType, request);
|
||||
}
|
||||
@ -136,7 +131,7 @@ public class ReplicationOperation<
|
||||
for (String allocationId : Sets.difference(inSyncAllocationIds, indexShardRoutingTable.getAllAllocationIds())) {
|
||||
// mark copy as stale
|
||||
pendingActions.incrementAndGet();
|
||||
replicasProxy.markShardCopyAsStaleIfNeeded(replicaRequest.shardId(), allocationId, replicaRequest.primaryTerm(),
|
||||
replicasProxy.markShardCopyAsStaleIfNeeded(replicaRequest.shardId(), allocationId,
|
||||
ReplicationOperation.this::decPendingAndFinishIfNeeded,
|
||||
ReplicationOperation.this::onPrimaryDemoted,
|
||||
throwable -> decPendingAndFinishIfNeeded()
|
||||
@ -205,7 +200,7 @@ public class ReplicationOperation<
|
||||
shardReplicaFailures.add(new ReplicationResponse.ShardInfo.Failure(
|
||||
shard.shardId(), shard.currentNodeId(), replicaException, restStatus, false));
|
||||
String message = String.format(Locale.ROOT, "failed to perform %s on replica %s", opType, shard);
|
||||
replicasProxy.failShardIfNeeded(shard, replicaRequest.primaryTerm(), message,
|
||||
replicasProxy.failShardIfNeeded(shard, message,
|
||||
replicaException, ReplicationOperation.this::decPendingAndFinishIfNeeded,
|
||||
ReplicationOperation.this::onPrimaryDemoted, throwable -> decPendingAndFinishIfNeeded());
|
||||
}
|
||||
@ -363,7 +358,6 @@ public class ReplicationOperation<
|
||||
* implementation.
|
||||
*
|
||||
* @param replica shard to fail
|
||||
* @param primaryTerm the primary term of the primary shard when requesting the failure
|
||||
* @param message a (short) description of the reason
|
||||
* @param exception the original exception which caused the ReplicationOperation to request the shard to be failed
|
||||
* @param onSuccess a callback to call when the shard has been successfully removed from the active set.
|
||||
@ -371,7 +365,7 @@ public class ReplicationOperation<
|
||||
* by the master.
|
||||
* @param onIgnoredFailure a callback to call when failing a shard has failed, but it that failure can be safely ignored and the
|
||||
*/
|
||||
void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception, Runnable onSuccess,
|
||||
void failShardIfNeeded(ShardRouting replica, String message, Exception exception, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
|
||||
|
||||
/**
|
||||
@ -381,13 +375,12 @@ public class ReplicationOperation<
|
||||
*
|
||||
* @param shardId shard id
|
||||
* @param allocationId allocation id to remove from the set of in-sync allocation ids
|
||||
* @param primaryTerm the primary term of the primary shard when requesting the failure
|
||||
* @param onSuccess a callback to call when the allocation id has been successfully removed from the in-sync set.
|
||||
* @param onPrimaryDemoted a callback to call when the request failed because the current primary was already demoted
|
||||
* by the master.
|
||||
* @param onIgnoredFailure a callback to call when the request failed, but the failure can be safely ignored.
|
||||
*/
|
||||
void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
|
||||
void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure);
|
||||
}
|
||||
|
||||
|
@ -55,8 +55,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
*/
|
||||
protected ShardId shardId;
|
||||
|
||||
long primaryTerm;
|
||||
|
||||
protected TimeValue timeout = DEFAULT_TIMEOUT;
|
||||
protected String index;
|
||||
|
||||
@ -170,16 +168,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
return routedBasedOnClusterVersion;
|
||||
}
|
||||
|
||||
/** returns the primary term active at the time the operation was performed on the primary shard */
|
||||
public long primaryTerm() {
|
||||
return primaryTerm;
|
||||
}
|
||||
|
||||
/** marks the primary term in which the operation was performed */
|
||||
public void primaryTerm(long term) {
|
||||
primaryTerm = term;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
ActionRequestValidationException validationException = null;
|
||||
@ -201,7 +189,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
timeout = new TimeValue(in);
|
||||
index = in.readString();
|
||||
routedBasedOnClusterVersion = in.readVLong();
|
||||
primaryTerm = in.readVLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -217,7 +204,6 @@ public abstract class ReplicationRequest<Request extends ReplicationRequest<Requ
|
||||
timeout.writeTo(out);
|
||||
out.writeString(index);
|
||||
out.writeVLong(routedBasedOnClusterVersion);
|
||||
out.writeVLong(primaryTerm);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -107,7 +107,6 @@ public abstract class TransportReplicationAction<
|
||||
// package private for testing
|
||||
protected final String transportReplicaAction;
|
||||
protected final String transportPrimaryAction;
|
||||
protected final ReplicationOperation.Replicas replicasProxy;
|
||||
|
||||
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
|
||||
ClusterService clusterService, IndicesService indicesService,
|
||||
@ -127,8 +126,6 @@ public abstract class TransportReplicationAction<
|
||||
registerRequestHandlers(actionName, transportService, request, replicaRequest, executor);
|
||||
|
||||
this.transportOptions = transportOptions();
|
||||
|
||||
this.replicasProxy = newReplicasProxy();
|
||||
}
|
||||
|
||||
protected void registerRequestHandlers(String actionName, TransportService transportService, Supplier<Request> request,
|
||||
@ -153,8 +150,8 @@ public abstract class TransportReplicationAction<
|
||||
new ReroutePhase((ReplicationTask) task, request, listener).run();
|
||||
}
|
||||
|
||||
protected ReplicationOperation.Replicas newReplicasProxy() {
|
||||
return new ReplicasProxy();
|
||||
protected ReplicationOperation.Replicas newReplicasProxy(long primaryTerm) {
|
||||
return new ReplicasProxy(primaryTerm);
|
||||
}
|
||||
|
||||
protected abstract Response newResponseInstance();
|
||||
@ -275,28 +272,32 @@ public abstract class TransportReplicationAction<
|
||||
|
||||
@Override
|
||||
public void messageReceived(ConcreteShardRequest<Request> request, TransportChannel channel, Task task) {
|
||||
new AsyncPrimaryAction(request.request, request.targetAllocationID, channel, (ReplicationTask) task).run();
|
||||
new AsyncPrimaryAction(request.request, request.targetAllocationID, request.primaryTerm, channel, (ReplicationTask) task).run();
|
||||
}
|
||||
}
|
||||
|
||||
class AsyncPrimaryAction extends AbstractRunnable implements ActionListener<PrimaryShardReference> {
|
||||
|
||||
private final Request request;
|
||||
/** targetAllocationID of the shard this request is meant for */
|
||||
// targetAllocationID of the shard this request is meant for
|
||||
private final String targetAllocationID;
|
||||
// primary term of the shard this request is meant for
|
||||
private final long primaryTerm;
|
||||
private final TransportChannel channel;
|
||||
private final ReplicationTask replicationTask;
|
||||
|
||||
AsyncPrimaryAction(Request request, String targetAllocationID, TransportChannel channel, ReplicationTask replicationTask) {
|
||||
AsyncPrimaryAction(Request request, String targetAllocationID, long primaryTerm, TransportChannel channel,
|
||||
ReplicationTask replicationTask) {
|
||||
this.request = request;
|
||||
this.targetAllocationID = targetAllocationID;
|
||||
this.primaryTerm = primaryTerm;
|
||||
this.channel = channel;
|
||||
this.replicationTask = replicationTask;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
acquirePrimaryShardReference(request.shardId(), targetAllocationID, this);
|
||||
acquirePrimaryShardReference(request.shardId(), targetAllocationID, primaryTerm, this);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -312,7 +313,7 @@ public abstract class TransportReplicationAction<
|
||||
assert primary.relocating() : "indexShard is marked as relocated but routing isn't" + primary;
|
||||
DiscoveryNode relocatingNode = clusterService.state().nodes().get(primary.relocatingNodeId());
|
||||
transportService.sendRequest(relocatingNode, transportPrimaryAction,
|
||||
new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId()),
|
||||
new ConcreteShardRequest<>(request, primary.allocationId().getRelocationId(), primaryTerm),
|
||||
transportOptions,
|
||||
new TransportChannelResponseHandler<Response>(logger, channel, "rerouting indexing to target primary " + primary,
|
||||
TransportReplicationAction.this::newResponseInstance) {
|
||||
@ -384,7 +385,7 @@ public abstract class TransportReplicationAction<
|
||||
Request request, ActionListener<PrimaryResult<ReplicaRequest, Response>> listener,
|
||||
PrimaryShardReference primaryShardReference) {
|
||||
return new ReplicationOperation<>(request, primaryShardReference, listener,
|
||||
replicasProxy, logger, actionName);
|
||||
newReplicasProxy(primaryTerm), logger, actionName);
|
||||
}
|
||||
}
|
||||
|
||||
@ -470,6 +471,7 @@ public abstract class TransportReplicationAction<
|
||||
new AsyncReplicaAction(
|
||||
replicaRequest.getRequest(),
|
||||
replicaRequest.getTargetAllocationID(),
|
||||
replicaRequest.getPrimaryTerm(),
|
||||
replicaRequest.getGlobalCheckpoint(),
|
||||
channel,
|
||||
(ReplicationTask) task).run();
|
||||
@ -493,6 +495,7 @@ public abstract class TransportReplicationAction<
|
||||
private final ReplicaRequest request;
|
||||
// allocation id of the replica this request is meant for
|
||||
private final String targetAllocationID;
|
||||
private final long primaryTerm;
|
||||
private final long globalCheckpoint;
|
||||
private final TransportChannel channel;
|
||||
private final IndexShard replica;
|
||||
@ -507,6 +510,7 @@ public abstract class TransportReplicationAction<
|
||||
AsyncReplicaAction(
|
||||
ReplicaRequest request,
|
||||
String targetAllocationID,
|
||||
long primaryTerm,
|
||||
long globalCheckpoint,
|
||||
TransportChannel channel,
|
||||
ReplicationTask task) {
|
||||
@ -514,6 +518,7 @@ public abstract class TransportReplicationAction<
|
||||
this.channel = channel;
|
||||
this.task = task;
|
||||
this.targetAllocationID = targetAllocationID;
|
||||
this.primaryTerm = primaryTerm;
|
||||
this.globalCheckpoint = globalCheckpoint;
|
||||
final ShardId shardId = request.shardId();
|
||||
assert shardId != null : "request shardId must be set";
|
||||
@ -554,7 +559,7 @@ public abstract class TransportReplicationAction<
|
||||
new TransportChannelResponseHandler<>(logger, channel, extraMessage,
|
||||
() -> TransportResponse.Empty.INSTANCE);
|
||||
transportService.sendRequest(clusterService.localNode(), transportReplicaAction,
|
||||
new ConcreteReplicaRequest<>(request, targetAllocationID, globalCheckpoint),
|
||||
new ConcreteReplicaRequest<>(request, targetAllocationID, primaryTerm, globalCheckpoint),
|
||||
handler);
|
||||
}
|
||||
|
||||
@ -596,7 +601,7 @@ public abstract class TransportReplicationAction<
|
||||
throw new ShardNotFoundException(this.replica.shardId(), "expected aID [{}] but found [{}]", targetAllocationID,
|
||||
actualAllocationId);
|
||||
}
|
||||
replica.acquireReplicaOperationPermit(request.primaryTerm, globalCheckpoint, this, executor);
|
||||
replica.acquireReplicaOperationPermit(primaryTerm, globalCheckpoint, this, executor);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -694,19 +699,20 @@ public abstract class TransportReplicationAction<
|
||||
}
|
||||
final DiscoveryNode node = state.nodes().get(primary.currentNodeId());
|
||||
if (primary.currentNodeId().equals(state.nodes().getLocalNodeId())) {
|
||||
performLocalAction(state, primary, node);
|
||||
performLocalAction(state, primary, node, indexMetaData);
|
||||
} else {
|
||||
performRemoteAction(state, primary, node);
|
||||
}
|
||||
}
|
||||
|
||||
private void performLocalAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
|
||||
private void performLocalAction(ClusterState state, ShardRouting primary, DiscoveryNode node, IndexMetaData indexMetaData) {
|
||||
setPhase(task, "waiting_on_primary");
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("send action [{}] to local primary [{}] for request [{}] with cluster state version [{}] to [{}] ",
|
||||
transportPrimaryAction, request.shardId(), request, state.version(), primary.currentNodeId());
|
||||
}
|
||||
performAction(node, transportPrimaryAction, true, new ConcreteShardRequest<>(request, primary.allocationId().getId()));
|
||||
performAction(node, transportPrimaryAction, true,
|
||||
new ConcreteShardRequest<>(request, primary.allocationId().getId(), indexMetaData.primaryTerm(primary.id())));
|
||||
}
|
||||
|
||||
private void performRemoteAction(ClusterState state, ShardRouting primary, DiscoveryNode node) {
|
||||
@ -906,7 +912,7 @@ public abstract class TransportReplicationAction<
|
||||
* Tries to acquire reference to {@link IndexShard} to perform a primary operation. Released after performing primary operation locally
|
||||
* and replication of the operation to all replica shards is completed / failed (see {@link ReplicationOperation}).
|
||||
*/
|
||||
private void acquirePrimaryShardReference(ShardId shardId, String allocationId,
|
||||
private void acquirePrimaryShardReference(ShardId shardId, String allocationId, long primaryTerm,
|
||||
ActionListener<PrimaryShardReference> onReferenceAcquired) {
|
||||
IndexShard indexShard = getIndexShard(shardId);
|
||||
// we may end up here if the cluster state used to route the primary is so stale that the underlying
|
||||
@ -920,6 +926,11 @@ public abstract class TransportReplicationAction<
|
||||
if (actualAllocationId.equals(allocationId) == false) {
|
||||
throw new ShardNotFoundException(shardId, "expected aID [{}] but found [{}]", allocationId, actualAllocationId);
|
||||
}
|
||||
final long actualTerm = indexShard.getPrimaryTerm();
|
||||
if (actualTerm != primaryTerm) {
|
||||
throw new ShardNotFoundException(shardId, "expected aID [{}] with term [{}] but found [{}]", allocationId,
|
||||
primaryTerm, actualTerm);
|
||||
}
|
||||
|
||||
ActionListener<Releasable> onAcquired = new ActionListener<Releasable>() {
|
||||
@Override
|
||||
@ -984,11 +995,8 @@ public abstract class TransportReplicationAction<
|
||||
@Override
|
||||
public PrimaryResult perform(Request request) throws Exception {
|
||||
PrimaryResult result = shardOperationOnPrimary(request, indexShard);
|
||||
if (result.replicaRequest() != null) {
|
||||
assert result.finalFailure == null : "a replica request [" + result.replicaRequest()
|
||||
+ "] with a primary failure [" + result.finalFailure + "]";
|
||||
result.replicaRequest().primaryTerm(indexShard.getPrimaryTerm());
|
||||
}
|
||||
assert result.replicaRequest() == null || result.finalFailure == null : "a replica request [" + result.replicaRequest()
|
||||
+ "] with a primary failure [" + result.finalFailure + "]";
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1011,7 +1019,6 @@ public abstract class TransportReplicationAction<
|
||||
public ReplicationGroup getReplicationGroup() {
|
||||
return indexShard.getReplicationGroup();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -1023,6 +1030,12 @@ public abstract class TransportReplicationAction<
|
||||
}
|
||||
|
||||
public ReplicaResponse(long localCheckpoint) {
|
||||
/*
|
||||
* A replica should always know its own local checkpoint so this should always be a valid sequence number or the pre-6.0 local
|
||||
* checkpoint value when simulating responses to replication actions that pre-6.0 nodes are not aware of (e.g., the global
|
||||
* checkpoint background sync, and the primary/replica resync).
|
||||
*/
|
||||
assert localCheckpoint != SequenceNumbersService.UNASSIGNED_SEQ_NO;
|
||||
this.localCheckpoint = localCheckpoint;
|
||||
}
|
||||
|
||||
@ -1062,8 +1075,10 @@ public abstract class TransportReplicationAction<
|
||||
*/
|
||||
protected class ReplicasProxy implements ReplicationOperation.Replicas<ReplicaRequest> {
|
||||
|
||||
public ReplicasProxy() {
|
||||
protected final long primaryTerm;
|
||||
|
||||
public ReplicasProxy(long primaryTerm) {
|
||||
this.primaryTerm = primaryTerm;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1079,12 +1094,12 @@ public abstract class TransportReplicationAction<
|
||||
return;
|
||||
}
|
||||
final ConcreteReplicaRequest<ReplicaRequest> replicaRequest =
|
||||
new ConcreteReplicaRequest<>(request, replica.allocationId().getId(), globalCheckpoint);
|
||||
new ConcreteReplicaRequest<>(request, replica.allocationId().getId(), primaryTerm, globalCheckpoint);
|
||||
sendReplicaRequest(replicaRequest, node, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void failShardIfNeeded(ShardRouting replica, long primaryTerm, String message, Exception exception,
|
||||
public void failShardIfNeeded(ShardRouting replica, String message, Exception exception,
|
||||
Runnable onSuccess, Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
// This does not need to fail the shard. The idea is that this
|
||||
// is a non-write operation (something like a refresh or a global
|
||||
@ -1094,7 +1109,7 @@ public abstract class TransportReplicationAction<
|
||||
}
|
||||
|
||||
@Override
|
||||
public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, long primaryTerm, Runnable onSuccess,
|
||||
public void markShardCopyAsStaleIfNeeded(ShardId shardId, String allocationId, Runnable onSuccess,
|
||||
Consumer<Exception> onPrimaryDemoted, Consumer<Exception> onIgnoredFailure) {
|
||||
// This does not need to make the shard stale. The idea is that this
|
||||
// is a non-write operation (something like a refresh or a global
|
||||
@ -1125,19 +1140,23 @@ public abstract class TransportReplicationAction<
|
||||
/** {@link AllocationId#getId()} of the shard this request is sent to **/
|
||||
private String targetAllocationID;
|
||||
|
||||
private long primaryTerm;
|
||||
|
||||
private R request;
|
||||
|
||||
public ConcreteShardRequest(Supplier<R> requestSupplier) {
|
||||
request = requestSupplier.get();
|
||||
// null now, but will be populated by reading from the streams
|
||||
targetAllocationID = null;
|
||||
primaryTerm = 0L;
|
||||
}
|
||||
|
||||
public ConcreteShardRequest(R request, String targetAllocationID) {
|
||||
public ConcreteShardRequest(R request, String targetAllocationID, long primaryTerm) {
|
||||
Objects.requireNonNull(request);
|
||||
Objects.requireNonNull(targetAllocationID);
|
||||
this.request = request;
|
||||
this.targetAllocationID = targetAllocationID;
|
||||
this.primaryTerm = primaryTerm;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -1161,18 +1180,20 @@ public abstract class TransportReplicationAction<
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "]";
|
||||
return "[" + request.getDescription() + "] for aID [" + targetAllocationID + "] and term [" + primaryTerm + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
targetAllocationID = in.readString();
|
||||
primaryTerm = in.readVLong();
|
||||
request.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(targetAllocationID);
|
||||
out.writeVLong(primaryTerm);
|
||||
request.writeTo(out);
|
||||
}
|
||||
|
||||
@ -1184,9 +1205,13 @@ public abstract class TransportReplicationAction<
|
||||
return targetAllocationID;
|
||||
}
|
||||
|
||||
public long getPrimaryTerm() {
|
||||
return primaryTerm;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "request: " + request + ", target allocation id: " + targetAllocationID;
|
||||
return "request: " + request + ", target allocation id: " + targetAllocationID + ", primary term: " + primaryTerm;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1198,8 +1223,9 @@ public abstract class TransportReplicationAction<
|
||||
super(requestSupplier);
|
||||
}
|
||||
|
||||
public ConcreteReplicaRequest(final R request, final String targetAllocationID, final long globalCheckpoint) {
|
||||
super(request, targetAllocationID);
|
||||
public ConcreteReplicaRequest(final R request, final String targetAllocationID, final long primaryTerm,
|
||||
final long globalCheckpoint) {
|
||||
super(request, targetAllocationID, primaryTerm);
|
||||
this.globalCheckpoint = globalCheckpoint;
|
||||
}
|
||||
|
||||
@ -1229,6 +1255,7 @@ public abstract class TransportReplicationAction<
|
||||
public String toString() {
|
||||
return "ConcreteReplicaRequest{" +
|
||||
"targetAllocationID='" + getTargetAllocationID() + '\'' +
|
||||
", primaryTerm='" + getPrimaryTerm() + '\'' +
|
||||
", request=" + getRequest() +
|
||||
", globalCheckpoint=" + globalCheckpoint +
|
||||
'}';
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user