Reindex from remote
This adds a remote option to reindex that looks like ``` curl -POST 'localhost:9200/_reindex?pretty' -d'{ "source": { "remote": { "host": "http://otherhost:9200" }, "index": "target", "query": { "match": { "foo": "bar" } } }, "dest": { "index": "target" } }' ``` This reindex has all of the features of local reindex: * Using queries to filter what is copied * Retry on rejection * Throttle/rethottle The big advantage of this version is that it goes over the HTTP API which can be made backwards compatible. Some things are different: The query field is sent directly to the other node rather than parsed on the coordinating node. This should allow it to support constructs that are invalid on the coordinating node but are valid on the target node. Mostly, that means old syntax.
This commit is contained in:
parent
96f283c195
commit
b3c015e2bb
|
@ -89,6 +89,13 @@ public abstract class BackoffPolicy implements Iterable<TimeValue> {
|
|||
return new ExponentialBackoff((int) checkDelay(initialDelay).millis(), maxNumberOfRetries);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps the backoff policy in one that calls a method every time a new backoff is taken from the policy.
|
||||
*/
|
||||
public static BackoffPolicy wrap(BackoffPolicy delegate, Runnable onBackoff) {
|
||||
return new WrappedBackoffPolicy(delegate, onBackoff);
|
||||
}
|
||||
|
||||
private static TimeValue checkDelay(TimeValue delay) {
|
||||
if (delay.millis() > Integer.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("delay must be <= " + Integer.MAX_VALUE + " ms");
|
||||
|
@ -200,4 +207,43 @@ public abstract class BackoffPolicy implements Iterable<TimeValue> {
|
|||
return delay;
|
||||
}
|
||||
}
|
||||
|
||||
private static final class WrappedBackoffPolicy extends BackoffPolicy {
|
||||
private final BackoffPolicy delegate;
|
||||
private final Runnable onBackoff;
|
||||
|
||||
public WrappedBackoffPolicy(BackoffPolicy delegate, Runnable onBackoff) {
|
||||
this.delegate = delegate;
|
||||
this.onBackoff = onBackoff;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new WrappedBackoffIterator(delegate.iterator(), onBackoff);
|
||||
}
|
||||
}
|
||||
|
||||
private static final class WrappedBackoffIterator implements Iterator<TimeValue> {
|
||||
private final Iterator<TimeValue> delegate;
|
||||
private final Runnable onBackoff;
|
||||
|
||||
public WrappedBackoffIterator(Iterator<TimeValue> delegate, Runnable onBackoff) {
|
||||
this.delegate = delegate;
|
||||
this.onBackoff = onBackoff;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return delegate.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (false == delegate.hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
onBackoff.run();
|
||||
return delegate.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -413,6 +413,7 @@ public final class ObjectParser<Value, Context extends ParseFieldMatcherSupplier
|
|||
STRING(VALUE_STRING),
|
||||
STRING_OR_NULL(VALUE_STRING, VALUE_NULL),
|
||||
FLOAT(VALUE_NUMBER, VALUE_STRING),
|
||||
FLOAT_OR_NULL(VALUE_NUMBER, VALUE_STRING, VALUE_NULL),
|
||||
DOUBLE(VALUE_NUMBER, VALUE_STRING),
|
||||
LONG(VALUE_NUMBER, VALUE_STRING),
|
||||
INT(VALUE_NUMBER, VALUE_STRING),
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
public class BackoffPolicyTests extends ESTestCase {
|
||||
public void testWrapBackoffPolicy() {
|
||||
TimeValue timeValue = timeValueMillis(between(0, Integer.MAX_VALUE));
|
||||
int maxNumberOfRetries = between(1, 1000);
|
||||
BackoffPolicy policy = BackoffPolicy.constantBackoff(timeValue, maxNumberOfRetries);
|
||||
AtomicInteger retries = new AtomicInteger();
|
||||
policy = BackoffPolicy.wrap(policy, retries::getAndIncrement);
|
||||
|
||||
int expectedRetries = 0;
|
||||
{
|
||||
// Fetching the iterator doesn't call the callback
|
||||
Iterator<TimeValue> itr = policy.iterator();
|
||||
assertEquals(expectedRetries, retries.get());
|
||||
|
||||
while (itr.hasNext()) {
|
||||
// hasNext doesn't trigger the callback
|
||||
assertEquals(expectedRetries, retries.get());
|
||||
// next does
|
||||
itr.next();
|
||||
expectedRetries += 1;
|
||||
assertEquals(expectedRetries, retries.get());
|
||||
}
|
||||
// next doesn't call the callback when there isn't a backoff available
|
||||
expectThrows(NoSuchElementException.class, () -> itr.next());
|
||||
assertEquals(expectedRetries, retries.get());
|
||||
}
|
||||
{
|
||||
// The second iterator also calls the callback
|
||||
Iterator<TimeValue> itr = policy.iterator();
|
||||
itr.next();
|
||||
expectedRetries += 1;
|
||||
assertEquals(expectedRetries, retries.get());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -30,6 +30,8 @@ integTest {
|
|||
configFile 'scripts/my_script.py'
|
||||
configFile 'userdict_ja.txt'
|
||||
configFile 'KeywordTokenizer.rbbi'
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', 'myself'
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,3 +83,15 @@ Closure setupTwitter = { String name, int count ->
|
|||
}
|
||||
setupTwitter('twitter', 5)
|
||||
setupTwitter('big_twitter', 120)
|
||||
|
||||
buildRestTests.setups['host'] = '''
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
'''
|
||||
|
|
|
@ -361,6 +361,60 @@ POST _reindex
|
|||
// CONSOLE
|
||||
// TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
|
||||
[float]
|
||||
=== Reindex from Remote
|
||||
|
||||
Reindex supports reindexing from a remote Elasticsearch cluster:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _reindex
|
||||
{
|
||||
"source": {
|
||||
"remote": {
|
||||
"host": "http://otherhost:9200",
|
||||
"username": "user",
|
||||
"password": "pass"
|
||||
},
|
||||
"index": "source",
|
||||
"query": {
|
||||
"match": {
|
||||
"test": "data"
|
||||
}
|
||||
}
|
||||
},
|
||||
"dest": {
|
||||
"index": "dest"
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:host]
|
||||
// TEST[s/^/PUT source\nGET _cluster\/health?wait_for_status=yellow\n/]
|
||||
// TEST[s/otherhost:9200",/\${host}"/]
|
||||
// TEST[s/"username": "user",//]
|
||||
// TEST[s/"password": "pass"//]
|
||||
|
||||
The `host` parameter must contain a scheme, host, and port (e.g.
|
||||
`https://otherhost:9200`). The `username` and `password` parameters are
|
||||
optional and when they are present reindex will connect to the remote
|
||||
Elasticsearch node using using basic auth. Be sure to use `https` when using
|
||||
basic auth or the password will be sent in plain text.
|
||||
|
||||
Remote hosts have to be explicitly whitelisted in elasticsearch.yaml using the
|
||||
`reindex.remote.whitelist` property. It can be set to a comma delimited list
|
||||
of allowed remote `host` and `port` combinations (e.g.
|
||||
`otherhost:9200, another:9200`). Scheme is ignored by the whitelist - only host
|
||||
and port are used.
|
||||
|
||||
This feature should work with remote clusters of any version of Elasticsearch
|
||||
you are likely to find. This should allow you to upgrade from any version of
|
||||
Elasticsearch to the current version by reindexing from a cluster of the old
|
||||
version.
|
||||
|
||||
To enable queries sent to older versions of Elasticsearch the `query` parameter
|
||||
is sent directly to the remote host without validation or modification.
|
||||
|
||||
[float]
|
||||
=== URL Parameters
|
||||
|
||||
|
|
|
@ -21,3 +21,41 @@ esplugin {
|
|||
description 'The Reindex module adds APIs to reindex from one index to another or update documents in place.'
|
||||
classname 'org.elasticsearch.index.reindex.ReindexPlugin'
|
||||
}
|
||||
|
||||
integTest {
|
||||
cluster {
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', 'myself'
|
||||
}
|
||||
}
|
||||
|
||||
run {
|
||||
// Whitelist reindexing from the local node so we can test it.
|
||||
setting 'reindex.remote.whitelist', 'myself'
|
||||
}
|
||||
|
||||
|
||||
dependencies {
|
||||
compile "org.elasticsearch.client:rest:${version}"
|
||||
// dependencies of the rest client
|
||||
compile "org.apache.httpcomponents:httpclient:${versions.httpclient}"
|
||||
compile "org.apache.httpcomponents:httpcore:${versions.httpcore}"
|
||||
compile "commons-codec:commons-codec:${versions.commonscodec}"
|
||||
compile "commons-logging:commons-logging:${versions.commonslogging}"
|
||||
}
|
||||
|
||||
dependencyLicenses {
|
||||
// Don't check the client's license. We know it.
|
||||
dependencies = project.configurations.runtime.fileCollection {
|
||||
it.group.startsWith('org.elasticsearch') == false
|
||||
} - project.configurations.provided
|
||||
}
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
// Commons logging
|
||||
'javax.servlet.ServletContextEvent',
|
||||
'javax.servlet.ServletContextListener',
|
||||
'org.apache.avalon.framework.logger.Logger',
|
||||
'org.apache.log.Hierarchy',
|
||||
'org.apache.log.Logger',
|
||||
]
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
4b95f4897fa13f2cd904aee711aeafc0c5295cd8
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,17 @@
|
|||
Apache Commons Codec
|
||||
Copyright 2002-2015 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
|
||||
src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java
|
||||
contains test data from http://aspell.net/test/orig/batch0.tab.
|
||||
Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org)
|
||||
|
||||
===============================================================================
|
||||
|
||||
The content of package org.apache.commons.codec.language.bm has been translated
|
||||
from the original php source code available at http://stevemorse.org/phoneticinfo.htm
|
||||
with permission from the original authors.
|
||||
Original source copyright:
|
||||
Copyright (c) 2008 Alexander Beider & Stephen P. Morse.
|
|
@ -0,0 +1 @@
|
|||
f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f
|
|
@ -0,0 +1,202 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
@ -0,0 +1,5 @@
|
|||
Apache Commons CLI
|
||||
Copyright 2001-2009 The Apache Software Foundation
|
||||
|
||||
This product includes software developed by
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
|
@ -0,0 +1 @@
|
|||
733db77aa8d9b2d68015189df76ab06304406e50
|
|
@ -0,0 +1,558 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
=========================================================================
|
||||
|
||||
This project includes Public Suffix List copied from
|
||||
<https://publicsuffix.org/list/effective_tld_names.dat>
|
||||
licensed under the terms of the Mozilla Public License, v. 2.0
|
||||
|
||||
Full license text: <http://mozilla.org/MPL/2.0/>
|
||||
|
||||
Mozilla Public License Version 2.0
|
||||
==================================
|
||||
|
||||
1. Definitions
|
||||
--------------
|
||||
|
||||
1.1. "Contributor"
|
||||
means each individual or legal entity that creates, contributes to
|
||||
the creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
means the combination of the Contributions of others (if any) used
|
||||
by a Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
means Source Code Form to which the initial Contributor has attached
|
||||
the notice in Exhibit A, the Executable Form of such Source Code
|
||||
Form, and Modifications of such Source Code Form, in each case
|
||||
including portions thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
(a) that the initial Contributor has attached the notice described
|
||||
in Exhibit B to the Covered Software; or
|
||||
|
||||
(b) that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the
|
||||
terms of a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
means a work that combines Covered Software with other material, in
|
||||
a separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
means having the right to grant, to the maximum extent possible,
|
||||
whether at the time of the initial grant or subsequently, any and
|
||||
all of the rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
means any of the following:
|
||||
|
||||
(a) any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered
|
||||
Software; or
|
||||
|
||||
(b) any new file in Source Code Form that contains any Covered
|
||||
Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the
|
||||
License, by the making, using, selling, offering for sale, having
|
||||
made, import, or transfer of either its Contributions or its
|
||||
Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
means either the GNU General Public License, Version 2.0, the GNU
|
||||
Lesser General Public License, Version 2.1, the GNU Affero General
|
||||
Public License, Version 3.0, or any later versions of those
|
||||
licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that
|
||||
controls, is controlled by, or is under common control with You. For
|
||||
purposes of this definition, "control" means (a) the power, direct
|
||||
or indirect, to cause the direction or management of such entity,
|
||||
whether by contract or otherwise, or (b) ownership of more than
|
||||
fifty percent (50%) of the outstanding shares or beneficial
|
||||
ownership of such entity.
|
||||
|
||||
2. License Grants and Conditions
|
||||
--------------------------------
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
(a) under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
(b) under Patent Claims of such Contributor to make, use, sell, offer
|
||||
for sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
(a) for any code that a Contributor has removed from Covered Software;
|
||||
or
|
||||
|
||||
(b) for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
(c) under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights
|
||||
to grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
|
||||
in Section 2.1.
|
||||
|
||||
3. Responsibilities
|
||||
-------------------
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
(a) such Covered Software must also be made available in Source Code
|
||||
Form, as described in Section 3.1, and You must inform recipients of
|
||||
the Executable Form how they can obtain a copy of such Source Code
|
||||
Form by reasonable means in a timely manner, at a charge no more
|
||||
than the cost of distribution to the recipient; and
|
||||
|
||||
(b) You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter
|
||||
the recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty,
|
||||
or limitations of liability) contained within the Source Code Form of
|
||||
the Covered Software, except that You may alter any license notices to
|
||||
the extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
---------------------------------------------------
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this
|
||||
License with respect to some or all of the Covered Software due to
|
||||
statute, judicial order, or regulation then You must: (a) comply with
|
||||
the terms of this License to the maximum extent possible; and (b)
|
||||
describe the limitations and the code they affect. Such description must
|
||||
be placed in a text file included with all distributions of the Covered
|
||||
Software under this License. Except to the extent prohibited by statute
|
||||
or regulation, such description must be sufficiently detailed for a
|
||||
recipient of ordinary skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
--------------
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically
|
||||
if You fail to comply with any of its terms. However, if You become
|
||||
compliant, then the rights granted under this License from a particular
|
||||
Contributor are reinstated (a) provisionally, unless and until such
|
||||
Contributor explicitly and finally terminates Your grants, and (b) on an
|
||||
ongoing basis, if such Contributor fails to notify You of the
|
||||
non-compliance by some reasonable means prior to 60 days after You have
|
||||
come back into compliance. Moreover, Your grants from a particular
|
||||
Contributor are reinstated on an ongoing basis if such Contributor
|
||||
notifies You of the non-compliance by some reasonable means, this is the
|
||||
first time You have received notice of non-compliance with this License
|
||||
from such Contributor, and You become compliant prior to 30 days after
|
||||
Your receipt of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
|
||||
end user license agreements (excluding distributors and resellers) which
|
||||
have been validly granted by You or Your distributors under this License
|
||||
prior to termination shall survive termination.
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 6. Disclaimer of Warranty *
|
||||
* ------------------------- *
|
||||
* *
|
||||
* Covered Software is provided under this License on an "as is" *
|
||||
* basis, without warranty of any kind, either expressed, implied, or *
|
||||
* statutory, including, without limitation, warranties that the *
|
||||
* Covered Software is free of defects, merchantable, fit for a *
|
||||
* particular purpose or non-infringing. The entire risk as to the *
|
||||
* quality and performance of the Covered Software is with You. *
|
||||
* Should any Covered Software prove defective in any respect, You *
|
||||
* (not any Contributor) assume the cost of any necessary servicing, *
|
||||
* repair, or correction. This disclaimer of warranty constitutes an *
|
||||
* essential part of this License. No use of any Covered Software is *
|
||||
* authorized under this License except under this disclaimer. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
************************************************************************
|
||||
* *
|
||||
* 7. Limitation of Liability *
|
||||
* -------------------------- *
|
||||
* *
|
||||
* Under no circumstances and under no legal theory, whether tort *
|
||||
* (including negligence), contract, or otherwise, shall any *
|
||||
* Contributor, or anyone who distributes Covered Software as *
|
||||
* permitted above, be liable to You for any direct, indirect, *
|
||||
* special, incidental, or consequential damages of any character *
|
||||
* including, without limitation, damages for lost profits, loss of *
|
||||
* goodwill, work stoppage, computer failure or malfunction, or any *
|
||||
* and all other commercial damages or losses, even if such party *
|
||||
* shall have been informed of the possibility of such damages. This *
|
||||
* limitation of liability shall not apply to liability for death or *
|
||||
* personal injury resulting from such party's negligence to the *
|
||||
* extent applicable law prohibits such limitation. Some *
|
||||
* jurisdictions do not allow the exclusion or limitation of *
|
||||
* incidental or consequential damages, so this exclusion and *
|
||||
* limitation may not apply to You. *
|
||||
* *
|
||||
************************************************************************
|
||||
|
||||
8. Litigation
|
||||
-------------
|
||||
|
||||
Any litigation relating to this License may be brought only in the
|
||||
courts of a jurisdiction where the defendant maintains its principal
|
||||
place of business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions.
|
||||
Nothing in this Section shall prevent a party's ability to bring
|
||||
cross-claims or counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
----------------
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides
|
||||
that the language of a contract shall be construed against the drafter
|
||||
shall not be used to construe this License against a Contributor.
|
||||
|
||||
10. Versions of the License
|
||||
---------------------------
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses
|
||||
|
||||
If You choose to distribute Source Code Form that is Incompatible With
|
||||
Secondary Licenses under the terms of this version of the License, the
|
||||
notice described in Exhibit B of this License must be attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
-------------------------------------------
|
||||
|
||||
This Source Code Form is subject to the terms of the Mozilla Public
|
||||
License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular
|
||||
file, then You may include the notice in a location (such as a LICENSE
|
||||
file in a relevant directory) where a recipient would be likely to look
|
||||
for such a notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
---------------------------------------------------------
|
||||
|
||||
This Source Code Form is "Incompatible With Secondary Licenses", as
|
||||
defined by the Mozilla Public License, v. 2.0.
|
|
@ -0,0 +1,5 @@
|
|||
Apache HttpComponents Client
|
||||
Copyright 1999-2015 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
|
@ -0,0 +1 @@
|
|||
b31526a230871fbe285fbcbe2813f9c0839ae9b0
|
|
@ -0,0 +1,241 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
=========================================================================
|
||||
|
||||
This project contains annotations in the package org.apache.http.annotation
|
||||
which are derived from JCIP-ANNOTATIONS
|
||||
Copyright (c) 2005 Brian Goetz and Tim Peierls.
|
||||
See http://www.jcip.net and the Creative Commons Attribution License
|
||||
(http://creativecommons.org/licenses/by/2.5)
|
||||
Full text: http://creativecommons.org/licenses/by/2.5/legalcode
|
||||
|
||||
License
|
||||
|
||||
THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
|
||||
|
||||
BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS.
|
||||
|
||||
1. Definitions
|
||||
|
||||
"Collective Work" means a work, such as a periodical issue, anthology or encyclopedia, in which the Work in its entirety in unmodified form, along with a number of other contributions, constituting separate and independent works in themselves, are assembled into a collective whole. A work that constitutes a Collective Work will not be considered a Derivative Work (as defined below) for the purposes of this License.
|
||||
"Derivative Work" means a work based upon the Work or upon the Work and other pre-existing works, such as a translation, musical arrangement, dramatization, fictionalization, motion picture version, sound recording, art reproduction, abridgment, condensation, or any other form in which the Work may be recast, transformed, or adapted, except that a work that constitutes a Collective Work will not be considered a Derivative Work for the purpose of this License. For the avoidance of doubt, where the Work is a musical composition or sound recording, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered a Derivative Work for the purpose of this License.
|
||||
"Licensor" means the individual or entity that offers the Work under the terms of this License.
|
||||
"Original Author" means the individual or entity who created the Work.
|
||||
"Work" means the copyrightable work of authorship offered under the terms of this License.
|
||||
"You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation.
|
||||
|
||||
2. Fair Use Rights. Nothing in this license is intended to reduce, limit, or restrict any rights arising from fair use, first sale or other limitations on the exclusive rights of the copyright owner under copyright law or other applicable laws.
|
||||
|
||||
3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below:
|
||||
|
||||
to reproduce the Work, to incorporate the Work into one or more Collective Works, and to reproduce the Work as incorporated in the Collective Works;
|
||||
to create and reproduce Derivative Works;
|
||||
to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission the Work including as incorporated in Collective Works;
|
||||
to distribute copies or phonorecords of, display publicly, perform publicly, and perform publicly by means of a digital audio transmission Derivative Works.
|
||||
|
||||
For the avoidance of doubt, where the work is a musical composition:
|
||||
Performance Royalties Under Blanket Licenses. Licensor waives the exclusive right to collect, whether individually or via a performance rights society (e.g. ASCAP, BMI, SESAC), royalties for the public performance or public digital performance (e.g. webcast) of the Work.
|
||||
Mechanical Rights and Statutory Royalties. Licensor waives the exclusive right to collect, whether individually or via a music rights agency or designated agent (e.g. Harry Fox Agency), royalties for any phonorecord You create from the Work ("cover version") and distribute, subject to the compulsory license created by 17 USC Section 115 of the US Copyright Act (or the equivalent in other jurisdictions).
|
||||
Webcasting Rights and Statutory Royalties. For the avoidance of doubt, where the Work is a sound recording, Licensor waives the exclusive right to collect, whether individually or via a performance-rights society (e.g. SoundExchange), royalties for the public digital performance (e.g. webcast) of the Work, subject to the compulsory license created by 17 USC Section 114 of the US Copyright Act (or the equivalent in other jurisdictions).
|
||||
|
||||
The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. All rights not expressly granted by Licensor are hereby reserved.
|
||||
|
||||
4. Restrictions.The license granted in Section 3 above is expressly made subject to and limited by the following restrictions:
|
||||
|
||||
You may distribute, publicly display, publicly perform, or publicly digitally perform the Work only under the terms of this License, and You must include a copy of, or the Uniform Resource Identifier for, this License with every copy or phonorecord of the Work You distribute, publicly display, publicly perform, or publicly digitally perform. You may not offer or impose any terms on the Work that alter or restrict the terms of this License or the recipients' exercise of the rights granted hereunder. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties. You may not distribute, publicly display, publicly perform, or publicly digitally perform the Work with any technological measures that control access or use of the Work in a manner inconsistent with the terms of this License Agreement. The above applies to the Work as incorporated in a Collective Work, but this does not require the Collective Work apart from the Work itself to be made subject to the terms of this License. If You create a Collective Work, upon notice from any Licensor You must, to the extent practicable, remove from the Collective Work any credit as required by clause 4(b), as requested. If You create a Derivative Work, upon notice from any Licensor You must, to the extent practicable, remove from the Derivative Work any credit as required by clause 4(b), as requested.
|
||||
If you distribute, publicly display, publicly perform, or publicly digitally perform the Work or any Derivative Works or Collective Works, You must keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or (ii) if the Original Author and/or Licensor designate another party or parties (e.g. a sponsor institute, publishing entity, journal) for attribution in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; the title of the Work if supplied; to the extent reasonably practicable, the Uniform Resource Identifier, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and in the case of a Derivative Work, a credit identifying the use of the Work in the Derivative Work (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). Such credit may be implemented in any reasonable manner; provided, however, that in the case of a Derivative Work or Collective Work, at a minimum such credit will appear where any other comparable authorship credit appears and in a manner at least as prominent as such other comparable authorship credit.
|
||||
|
||||
5. Representations, Warranties and Disclaimer
|
||||
|
||||
UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
|
||||
|
||||
6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
7. Termination
|
||||
|
||||
This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Derivative Works or Collective Works from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License.
|
||||
Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above.
|
||||
|
||||
8. Miscellaneous
|
||||
|
||||
Each time You distribute or publicly digitally perform the Work or a Collective Work, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License.
|
||||
Each time You distribute or publicly digitally perform a Derivative Work, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License.
|
||||
If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable.
|
||||
No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent.
|
||||
This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You.
|
|
@ -0,0 +1,8 @@
|
|||
Apache HttpComponents Core
|
||||
Copyright 2005-2014 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
||||
|
||||
This project contains annotations derived from JCIP-ANNOTATIONS
|
||||
Copyright (c) 2005 Brian Goetz and Tim Peierls. See http://www.jcip.net
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshResponse;
|
||||
|
@ -30,34 +29,24 @@ import org.elasticsearch.action.bulk.BulkRequest;
|
|||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.bulk.Retry;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.client.ParentTaskAssigningClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.lang.Math.max;
|
||||
import static java.lang.Math.min;
|
||||
|
@ -74,46 +63,57 @@ import static org.elasticsearch.search.sort.SortBuilders.fieldSort;
|
|||
* their tests can use them. Most methods run in the listener thread pool because the are meant to be fast and don't expect to block.
|
||||
*/
|
||||
public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBulkByScrollRequest<Request>> {
|
||||
protected final ESLogger logger;
|
||||
protected final BulkByScrollTask task;
|
||||
protected final ThreadPool threadPool;
|
||||
/**
|
||||
* The request for this action. Named mainRequest because we create lots of <code>request</code> variables all representing child
|
||||
* requests of this mainRequest.
|
||||
*/
|
||||
protected final Request mainRequest;
|
||||
protected final BulkByScrollTask task;
|
||||
|
||||
private final AtomicLong startTime = new AtomicLong(-1);
|
||||
private final AtomicReference<String> scroll = new AtomicReference<>();
|
||||
private final Set<String> destinationIndices = Collections.newSetFromMap(new ConcurrentHashMap<>());
|
||||
|
||||
private final ESLogger logger;
|
||||
private final ParentTaskAssigningClient client;
|
||||
private final ThreadPool threadPool;
|
||||
private final SearchRequest firstSearchRequest;
|
||||
private final ActionListener<BulkIndexByScrollResponse> listener;
|
||||
private final BackoffPolicy backoffPolicy;
|
||||
private final Retry bulkRetry;
|
||||
private final ScrollableHitSource scrollSource;
|
||||
|
||||
public AbstractAsyncBulkByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest,
|
||||
ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
ThreadPool threadPool, Request mainRequest, ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
this.task = task;
|
||||
this.logger = logger;
|
||||
this.client = client;
|
||||
this.threadPool = threadPool;
|
||||
this.mainRequest = mainRequest;
|
||||
this.firstSearchRequest = firstSearchRequest;
|
||||
this.listener = listener;
|
||||
backoffPolicy = buildBackoffPolicy();
|
||||
bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(wrapBackoffPolicy(backoffPolicy));
|
||||
BackoffPolicy backoffPolicy = buildBackoffPolicy();
|
||||
bulkRetry = Retry.on(EsRejectedExecutionException.class).policy(BackoffPolicy.wrap(backoffPolicy, task::countBulkRetry));
|
||||
scrollSource = buildScrollableResultSource(backoffPolicy);
|
||||
/*
|
||||
* Default to sorting by doc. We can't do this in the request itself because it is normal to *add* to the sorts rather than replace
|
||||
* them and if we add _doc as the first sort by default then sorts will never work.... So we add it here, only if there isn't
|
||||
* another sort.
|
||||
*/
|
||||
List<SortBuilder<?>> sorts = mainRequest.getSearchRequest().source().sorts();
|
||||
if (sorts == null || sorts.isEmpty()) {
|
||||
mainRequest.getSearchRequest().source().sort(fieldSort("_doc"));
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract BulkRequest buildBulk(Iterable<SearchHit> docs);
|
||||
protected abstract BulkRequest buildBulk(Iterable<? extends ScrollableHitSource.Hit> docs);
|
||||
|
||||
protected ScrollableHitSource buildScrollableResultSource(BackoffPolicy backoffPolicy) {
|
||||
return new ClientScrollableHitSource(logger, backoffPolicy, threadPool, task::countSearchRetry, this::finishHim, client,
|
||||
mainRequest.getSearchRequest());
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the response for reindex actions.
|
||||
*/
|
||||
protected BulkIndexByScrollResponse buildResponse(TimeValue took, List<BulkItemResponse.Failure> indexingFailures,
|
||||
List<ShardSearchFailure> searchFailures, boolean timedOut) {
|
||||
List<SearchFailure> searchFailures, boolean timedOut) {
|
||||
return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut);
|
||||
}
|
||||
|
||||
|
@ -126,50 +126,33 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
return;
|
||||
}
|
||||
try {
|
||||
// Default to sorting by _doc if it hasn't been changed.
|
||||
if (firstSearchRequest.source().sorts() == null) {
|
||||
firstSearchRequest.source().sort(fieldSort("_doc"));
|
||||
}
|
||||
startTime.set(System.nanoTime());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("executing initial scroll against {}{}",
|
||||
firstSearchRequest.indices() == null || firstSearchRequest.indices().length == 0 ? "all indices"
|
||||
: firstSearchRequest.indices(),
|
||||
firstSearchRequest.types() == null || firstSearchRequest.types().length == 0 ? ""
|
||||
: firstSearchRequest.types());
|
||||
}
|
||||
scrollSource.start(response -> onScrollResponse(timeValueNanos(System.nanoTime()), 0, response));
|
||||
} catch (Exception e) {
|
||||
finishHim(e);
|
||||
return;
|
||||
}
|
||||
searchWithRetry(listener -> client.search(firstSearchRequest, listener), (SearchResponse response) -> {
|
||||
logger.debug("[{}] documents match query", response.getHits().getTotalHits());
|
||||
onScrollResponse(timeValueNanos(System.nanoTime()), 0, response);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Process a scroll response.
|
||||
* @param lastBatchStartTime the time when the last batch started. Used to calculate the throttling delay.
|
||||
* @param lastBatchSize the size of the last batch. Used to calculate the throttling delay.
|
||||
* @param searchResponse the scroll response to process
|
||||
* @param response the scroll response to process
|
||||
*/
|
||||
void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, SearchResponse searchResponse) {
|
||||
void onScrollResponse(TimeValue lastBatchStartTime, int lastBatchSize, ScrollableHitSource.Response response) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
setScroll(searchResponse.getScrollId());
|
||||
if ( // If any of the shards failed that should abort the request.
|
||||
(searchResponse.getShardFailures() != null && searchResponse.getShardFailures().length > 0)
|
||||
(response.getFailures().size() > 0)
|
||||
// Timeouts aren't shard failures but we still need to pass them back to the user.
|
||||
|| searchResponse.isTimedOut()
|
||||
|| response.isTimedOut()
|
||||
) {
|
||||
startNormalTermination(emptyList(), unmodifiableList(Arrays.asList(searchResponse.getShardFailures())),
|
||||
searchResponse.isTimedOut());
|
||||
refreshAndFinish(emptyList(), response.getFailures(), response.isTimedOut());
|
||||
return;
|
||||
}
|
||||
long total = searchResponse.getHits().totalHits();
|
||||
long total = response.getTotalHits();
|
||||
if (mainRequest.getSize() > 0) {
|
||||
total = min(total, mainRequest.getSize());
|
||||
}
|
||||
|
@ -181,7 +164,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* It is important that the batch start time be calculated from here, scroll response to scroll response. That way the time
|
||||
* waiting on the scroll doesn't count against this batch in the throttle.
|
||||
*/
|
||||
prepareBulkRequest(timeValueNanos(System.nanoTime()), searchResponse);
|
||||
prepareBulkRequest(timeValueNanos(System.nanoTime()), response);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -198,27 +181,25 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* delay has been slept. Uses the generic thread pool because reindex is rare enough not to need its own thread pool and because the
|
||||
* thread may be blocked by the user script.
|
||||
*/
|
||||
void prepareBulkRequest(TimeValue thisBatchStartTime, SearchResponse searchResponse) {
|
||||
void prepareBulkRequest(TimeValue thisBatchStartTime, ScrollableHitSource.Response response) {
|
||||
if (task.isCancelled()) {
|
||||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
SearchHit[] docs = searchResponse.getHits().getHits();
|
||||
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", docs.length, searchResponse.getScrollId());
|
||||
if (docs.length == 0) {
|
||||
startNormalTermination(emptyList(), emptyList(), false);
|
||||
if (response.getHits().isEmpty()) {
|
||||
refreshAndFinish(emptyList(), emptyList(), false);
|
||||
return;
|
||||
}
|
||||
task.countBatch();
|
||||
List<SearchHit> docsIterable = Arrays.asList(docs);
|
||||
List<? extends ScrollableHitSource.Hit> hits = response.getHits();
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES) {
|
||||
// Truncate the docs if we have more than the request size
|
||||
// Truncate the hits if we have more than the request size
|
||||
long remaining = max(0, mainRequest.getSize() - task.getSuccessfullyProcessed());
|
||||
if (remaining < docs.length) {
|
||||
docsIterable = docsIterable.subList(0, (int) remaining);
|
||||
if (remaining < hits.size()) {
|
||||
hits = hits.subList(0, (int) remaining);
|
||||
}
|
||||
}
|
||||
BulkRequest request = buildBulk(docsIterable);
|
||||
BulkRequest request = buildBulk(hits);
|
||||
if (request.requests().isEmpty()) {
|
||||
/*
|
||||
* If we noop-ed the entire batch then just skip to the next batch or the BulkRequest would fail validation.
|
||||
|
@ -297,13 +278,13 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
addDestinationIndices(destinationIndicesThisBatch);
|
||||
|
||||
if (false == failures.isEmpty()) {
|
||||
startNormalTermination(unmodifiableList(failures), emptyList(), false);
|
||||
refreshAndFinish(unmodifiableList(failures), emptyList(), false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (mainRequest.getSize() != SIZE_ALL_MATCHES && task.getSuccessfullyProcessed() >= mainRequest.getSize()) {
|
||||
// We've processed all the requested docs.
|
||||
startNormalTermination(emptyList(), emptyList(), false);
|
||||
refreshAndFinish(emptyList(), emptyList(), false);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -324,11 +305,8 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
finishHim(null);
|
||||
return;
|
||||
}
|
||||
SearchScrollRequest request = new SearchScrollRequest();
|
||||
// Add the wait time into the scroll timeout so it won't timeout while we wait for throttling
|
||||
request.scrollId(scroll.get()).scroll(timeValueNanos(
|
||||
firstSearchRequest.scroll().keepAlive().nanos() + task.throttleWaitTime(lastBatchStartTime, lastBatchSize).nanos()));
|
||||
searchWithRetry(listener -> client.searchScroll(request, listener), (SearchResponse response) -> {
|
||||
TimeValue extraKeepAlive = task.throttleWaitTime(lastBatchStartTime, lastBatchSize);
|
||||
scrollSource.startNextScroll(extraKeepAlive, response -> {
|
||||
onScrollResponse(lastBatchStartTime, lastBatchSize, response);
|
||||
});
|
||||
}
|
||||
|
@ -344,9 +322,10 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
}
|
||||
|
||||
/**
|
||||
* Start terminating a request that finished non-catastrophically.
|
||||
* Start terminating a request that finished non-catastrophically by refreshing the modified indices and then proceeding to
|
||||
* {@link #finishHim(Exception, List, List, boolean)}.
|
||||
*/
|
||||
void startNormalTermination(List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures, boolean timedOut) {
|
||||
void refreshAndFinish(List<Failure> indexingFailures, List<SearchFailure> searchFailures, boolean timedOut) {
|
||||
if (task.isCancelled() || false == mainRequest.isRefresh() || destinationIndices.isEmpty()) {
|
||||
finishHim(null, indexingFailures, searchFailures, timedOut);
|
||||
return;
|
||||
|
@ -377,36 +356,13 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
|
||||
/**
|
||||
* Finish the request.
|
||||
* @param failure if non null then the request failed catastrophically with this exception
|
||||
* @param failure if non null then the request failed catastrophically with this exception
|
||||
* @param indexingFailures any indexing failures accumulated during the request
|
||||
* @param searchFailures any search failures accumulated during the request
|
||||
* @param timedOut have any of the sub-requests timed out?
|
||||
*/
|
||||
void finishHim(Exception failure, List<Failure> indexingFailures, List<ShardSearchFailure> searchFailures, boolean timedOut) {
|
||||
String scrollId = scroll.get();
|
||||
if (Strings.hasLength(scrollId)) {
|
||||
/*
|
||||
* Fire off the clear scroll but don't wait for it it return before
|
||||
* we send the use their response.
|
||||
*/
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.addScrollId(scrollId);
|
||||
/*
|
||||
* Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as
|
||||
* if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled.
|
||||
*/
|
||||
client.unwrap().clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClearScrollResponse response) {
|
||||
logger.debug("Freed [{}] contexts", response.getNumFreed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn("Failed to clear scroll [{}]", e, scrollId);
|
||||
}
|
||||
});
|
||||
}
|
||||
void finishHim(Exception failure, List<Failure> indexingFailures, List<SearchFailure> searchFailures, boolean timedOut) {
|
||||
scrollSource.close();
|
||||
if (failure == null) {
|
||||
listener.onResponse(
|
||||
buildResponse(timeValueNanos(System.nanoTime() - startTime.get()), indexingFailures, searchFailures, timedOut));
|
||||
|
@ -434,75 +390,6 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* Set the last returned scrollId. Exists entirely for testing.
|
||||
*/
|
||||
void setScroll(String scroll) {
|
||||
this.scroll.set(scroll);
|
||||
}
|
||||
|
||||
/**
|
||||
* Wraps a backoffPolicy in another policy that counts the number of backoffs acquired. Used to count bulk backoffs.
|
||||
*/
|
||||
private BackoffPolicy wrapBackoffPolicy(BackoffPolicy backoffPolicy) {
|
||||
return new BackoffPolicy() {
|
||||
@Override
|
||||
public Iterator<TimeValue> iterator() {
|
||||
return new Iterator<TimeValue>() {
|
||||
private final Iterator<TimeValue> delegate = backoffPolicy.iterator();
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return delegate.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeValue next() {
|
||||
if (false == delegate.hasNext()) {
|
||||
return null;
|
||||
}
|
||||
task.countBulkRetry();
|
||||
return delegate.next();
|
||||
}
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
|
||||
* rejected execution.
|
||||
*
|
||||
* @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
|
||||
* @param onResponse consumes the response from the action
|
||||
*/
|
||||
private <T> void searchWithRetry(Consumer<ActionListener<T>> action, Consumer<T> onResponse) {
|
||||
class RetryHelper extends AbstractRunnable implements ActionListener<T> {
|
||||
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
|
||||
|
||||
@Override
|
||||
public void onResponse(T response) {
|
||||
onResponse.accept(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
action.accept(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
|
||||
if (retries.hasNext()) {
|
||||
TimeValue delay = retries.next();
|
||||
logger.trace("retrying rejected search after [{}]", e, delay);
|
||||
threadPool.schedule(delay, ThreadPool.Names.SAME, this);
|
||||
task.countSearchRetry();
|
||||
} else {
|
||||
logger.warn("giving up on search because we retried {} times without success", e, retries);
|
||||
finishHim(e);
|
||||
}
|
||||
} else {
|
||||
logger.warn("giving up on search because it failed with a non-retryable exception", e);
|
||||
finishHim(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
new RetryHelper().run();
|
||||
scrollSource.setScroll(scroll);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.ParentTaskAssigningClient;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
|
@ -44,8 +43,6 @@ import org.elasticsearch.script.ExecutableScript;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -72,13 +69,13 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
* from copying search hit metadata (parent, routing, etc) to potentially transforming the
|
||||
* {@link RequestWrapper} completely.
|
||||
*/
|
||||
private final BiFunction<RequestWrapper<?>, SearchHit, RequestWrapper<?>> scriptApplier;
|
||||
private final BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> scriptApplier;
|
||||
|
||||
public AbstractAsyncBulkIndexByScrollAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client,
|
||||
ThreadPool threadPool, Request mainRequest, SearchRequest firstSearchRequest,
|
||||
ThreadPool threadPool, Request mainRequest,
|
||||
ActionListener<BulkIndexByScrollResponse> listener,
|
||||
ScriptService scriptService, ClusterState clusterState) {
|
||||
super(task, logger, client, threadPool, mainRequest, firstSearchRequest, listener);
|
||||
super(task, logger, client, threadPool, mainRequest, listener);
|
||||
this.scriptService = scriptService;
|
||||
this.clusterState = clusterState;
|
||||
this.scriptApplier = Objects.requireNonNull(buildScriptApplier(), "script applier must not be null");
|
||||
|
@ -87,15 +84,15 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
/**
|
||||
* Build the {@link BiFunction} to apply to all {@link RequestWrapper}.
|
||||
*/
|
||||
protected BiFunction<RequestWrapper<?>, SearchHit, RequestWrapper<?>> buildScriptApplier() {
|
||||
protected BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> buildScriptApplier() {
|
||||
// The default script applier executes a no-op
|
||||
return (request, searchHit) -> request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
protected BulkRequest buildBulk(Iterable<? extends ScrollableHitSource.Hit> docs) {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
for (SearchHit doc : docs) {
|
||||
for (ScrollableHitSource.Hit doc : docs) {
|
||||
if (accept(doc)) {
|
||||
RequestWrapper<?> request = scriptApplier.apply(copyMetadata(buildRequest(doc), doc), doc);
|
||||
if (request != null) {
|
||||
|
@ -111,14 +108,14 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
* from the bulk request. It is also where we fail on invalid search hits, like
|
||||
* when the document has no source but it's required.
|
||||
*/
|
||||
protected boolean accept(SearchHit doc) {
|
||||
if (doc.hasSource()) {
|
||||
protected boolean accept(ScrollableHitSource.Hit doc) {
|
||||
if (doc.getSource() == null) {
|
||||
/*
|
||||
* Either the document didn't store _source or we didn't fetch it for some reason. Since we don't allow the user to
|
||||
* change the "fields" part of the search request it is unlikely that we got here because we didn't fetch _source.
|
||||
* Thus the error message assumes that it wasn't stored.
|
||||
*/
|
||||
throw new IllegalArgumentException("[" + doc.index() + "][" + doc.type() + "][" + doc.id() + "] didn't store _source");
|
||||
throw new IllegalArgumentException("[" + doc.getIndex() + "][" + doc.getType() + "][" + doc.getId() + "] didn't store _source");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -128,34 +125,27 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
* metadata or scripting. That will be handled by copyMetadata and
|
||||
* apply functions that can be overridden.
|
||||
*/
|
||||
protected abstract RequestWrapper<?> buildRequest(SearchHit doc);
|
||||
protected abstract RequestWrapper<?> buildRequest(ScrollableHitSource.Hit doc);
|
||||
|
||||
/**
|
||||
* Copies the metadata from a hit to the request.
|
||||
*/
|
||||
protected RequestWrapper<?> copyMetadata(RequestWrapper<?> request, SearchHit doc) {
|
||||
copyParent(request, fieldValue(doc, ParentFieldMapper.NAME));
|
||||
copyRouting(request, fieldValue(doc, RoutingFieldMapper.NAME));
|
||||
protected RequestWrapper<?> copyMetadata(RequestWrapper<?> request, ScrollableHitSource.Hit doc) {
|
||||
request.setParent(doc.getParent());
|
||||
copyRouting(request, doc.getRouting());
|
||||
|
||||
// Comes back as a Long but needs to be a string
|
||||
Long timestamp = fieldValue(doc, TimestampFieldMapper.NAME);
|
||||
Long timestamp = doc.getTimestamp();
|
||||
if (timestamp != null) {
|
||||
request.setTimestamp(timestamp.toString());
|
||||
}
|
||||
Long ttl = fieldValue(doc, TTLFieldMapper.NAME);
|
||||
Long ttl = doc.getTTL();
|
||||
if (ttl != null) {
|
||||
request.setTtl(ttl);
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy the parent from a search hit to the request.
|
||||
*/
|
||||
protected void copyParent(RequestWrapper<?> request, String parent) {
|
||||
request.setParent(parent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy the routing from a search hit to the request.
|
||||
*/
|
||||
|
@ -163,11 +153,6 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
request.setRouting(routing);
|
||||
}
|
||||
|
||||
protected <T> T fieldValue(SearchHit doc, String fieldName) {
|
||||
SearchHitField field = doc.field(fieldName);
|
||||
return field == null ? null : field.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper for the {@link ActionRequest} that are used in this action class.
|
||||
*/
|
||||
|
@ -435,7 +420,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
/**
|
||||
* Apply a {@link Script} to a {@link RequestWrapper}
|
||||
*/
|
||||
public abstract class ScriptApplier implements BiFunction<RequestWrapper<?>, SearchHit, RequestWrapper<?>> {
|
||||
public abstract class ScriptApplier implements BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> {
|
||||
|
||||
private final BulkByScrollTask task;
|
||||
private final ScriptService scriptService;
|
||||
|
@ -455,7 +440,7 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public RequestWrapper<?> apply(RequestWrapper<?> request, SearchHit doc) {
|
||||
public RequestWrapper<?> apply(RequestWrapper<?> request, ScrollableHitSource.Hit doc) {
|
||||
if (script == null) {
|
||||
return request;
|
||||
}
|
||||
|
@ -467,18 +452,18 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
context = new HashMap<>();
|
||||
}
|
||||
|
||||
context.put(IndexFieldMapper.NAME, doc.index());
|
||||
context.put(TypeFieldMapper.NAME, doc.type());
|
||||
context.put(IdFieldMapper.NAME, doc.id());
|
||||
context.put(IndexFieldMapper.NAME, doc.getIndex());
|
||||
context.put(TypeFieldMapper.NAME, doc.getType());
|
||||
context.put(IdFieldMapper.NAME, doc.getId());
|
||||
Long oldVersion = doc.getVersion();
|
||||
context.put(VersionFieldMapper.NAME, oldVersion);
|
||||
String oldParent = fieldValue(doc, ParentFieldMapper.NAME);
|
||||
String oldParent = doc.getParent();
|
||||
context.put(ParentFieldMapper.NAME, oldParent);
|
||||
String oldRouting = fieldValue(doc, RoutingFieldMapper.NAME);
|
||||
String oldRouting = doc.getRouting();
|
||||
context.put(RoutingFieldMapper.NAME, oldRouting);
|
||||
Long oldTimestamp = fieldValue(doc, TimestampFieldMapper.NAME);
|
||||
Long oldTimestamp = doc.getTimestamp();
|
||||
context.put(TimestampFieldMapper.NAME, oldTimestamp);
|
||||
Long oldTTL = fieldValue(doc, TTLFieldMapper.NAME);
|
||||
Long oldTTL = doc.getTTL();
|
||||
context.put(TTLFieldMapper.NAME, oldTTL);
|
||||
context.put(SourceFieldMapper.NAME, request.getSource());
|
||||
|
||||
|
@ -501,15 +486,15 @@ public abstract class AbstractAsyncBulkIndexByScrollAction<Request extends Abstr
|
|||
request.setSource((Map<String, Object>) resultCtx.remove(SourceFieldMapper.NAME));
|
||||
|
||||
Object newValue = context.remove(IndexFieldMapper.NAME);
|
||||
if (false == doc.index().equals(newValue)) {
|
||||
if (false == doc.getIndex().equals(newValue)) {
|
||||
scriptChangedIndex(request, newValue);
|
||||
}
|
||||
newValue = context.remove(TypeFieldMapper.NAME);
|
||||
if (false == doc.type().equals(newValue)) {
|
||||
if (false == doc.getType().equals(newValue)) {
|
||||
scriptChangedType(request, newValue);
|
||||
}
|
||||
newValue = context.remove(IdFieldMapper.NAME);
|
||||
if (false == doc.id().equals(newValue)) {
|
||||
if (false == doc.getId().equals(newValue)) {
|
||||
scriptChangedId(request, newValue);
|
||||
}
|
||||
newValue = context.remove(VersionFieldMapper.NAME);
|
||||
|
|
|
@ -74,7 +74,7 @@ public abstract class AbstractBulkIndexByScrollRequest<Self extends AbstractBulk
|
|||
protected void searchToString(StringBuilder b) {
|
||||
super.searchToString(b);
|
||||
if (script != null) {
|
||||
b.append(" updated with [").append(script).append(']');
|
||||
b.append(" updated with ").append(script);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,21 +21,18 @@ package org.elasticsearch.index.reindex;
|
|||
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static java.lang.Math.min;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearchFailure;
|
||||
|
||||
/**
|
||||
* Response used for actions that index many documents using a scroll request.
|
||||
|
@ -43,18 +40,18 @@ import static org.elasticsearch.action.search.ShardSearchFailure.readShardSearch
|
|||
public class BulkIndexByScrollResponse extends ActionResponse implements ToXContent {
|
||||
private TimeValue took;
|
||||
private BulkByScrollTask.Status status;
|
||||
private List<Failure> indexingFailures;
|
||||
private List<ShardSearchFailure> searchFailures;
|
||||
private List<Failure> bulkFailures;
|
||||
private List<SearchFailure> searchFailures;
|
||||
private boolean timedOut;
|
||||
|
||||
public BulkIndexByScrollResponse() {
|
||||
}
|
||||
|
||||
public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List<Failure> indexingFailures,
|
||||
List<ShardSearchFailure> searchFailures, boolean timedOut) {
|
||||
public BulkIndexByScrollResponse(TimeValue took, BulkByScrollTask.Status status, List<Failure> bulkFailures,
|
||||
List<SearchFailure> searchFailures, boolean timedOut) {
|
||||
this.took = took;
|
||||
this.status = requireNonNull(status, "Null status not supported");
|
||||
this.indexingFailures = indexingFailures;
|
||||
this.bulkFailures = bulkFailures;
|
||||
this.searchFailures = searchFailures;
|
||||
this.timedOut = timedOut;
|
||||
}
|
||||
|
@ -113,17 +110,16 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont
|
|||
}
|
||||
|
||||
/**
|
||||
* All of the indexing failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the
|
||||
* default).
|
||||
* All of the bulk failures. Version conflicts are only included if the request sets abortOnVersionConflict to true (the default).
|
||||
*/
|
||||
public List<Failure> getIndexingFailures() {
|
||||
return indexingFailures;
|
||||
public List<Failure> getBulkFailures() {
|
||||
return bulkFailures;
|
||||
}
|
||||
|
||||
/**
|
||||
* All search failures.
|
||||
*/
|
||||
public List<ShardSearchFailure> getSearchFailures() {
|
||||
public List<SearchFailure> getSearchFailures() {
|
||||
return searchFailures;
|
||||
}
|
||||
|
||||
|
@ -139,14 +135,8 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont
|
|||
super.writeTo(out);
|
||||
took.writeTo(out);
|
||||
status.writeTo(out);
|
||||
out.writeVInt(indexingFailures.size());
|
||||
for (Failure failure: indexingFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
out.writeVInt(searchFailures.size());
|
||||
for (ShardSearchFailure failure: searchFailures) {
|
||||
failure.writeTo(out);
|
||||
}
|
||||
out.writeList(bulkFailures);
|
||||
out.writeList(searchFailures);
|
||||
out.writeBoolean(timedOut);
|
||||
}
|
||||
|
||||
|
@ -155,19 +145,9 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont
|
|||
super.readFrom(in);
|
||||
took = new TimeValue(in);
|
||||
status = new BulkByScrollTask.Status(in);
|
||||
int indexingFailuresCount = in.readVInt();
|
||||
List<Failure> indexingFailures = new ArrayList<>(indexingFailuresCount);
|
||||
for (int i = 0; i < indexingFailuresCount; i++) {
|
||||
indexingFailures.add(new Failure(in));
|
||||
}
|
||||
this.indexingFailures = unmodifiableList(indexingFailures);
|
||||
int searchFailuresCount = in.readVInt();
|
||||
List<ShardSearchFailure> searchFailures = new ArrayList<>(searchFailuresCount);
|
||||
for (int i = 0; i < searchFailuresCount; i++) {
|
||||
searchFailures.add(readShardSearchFailure(in));
|
||||
}
|
||||
this.searchFailures = unmodifiableList(searchFailures);
|
||||
this.timedOut = in.readBoolean();
|
||||
bulkFailures = in.readList(Failure::new);
|
||||
searchFailures = in.readList(SearchFailure::new);
|
||||
timedOut = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -176,15 +156,13 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont
|
|||
builder.field("timed_out", timedOut);
|
||||
status.innerXContent(builder, params);
|
||||
builder.startArray("failures");
|
||||
for (Failure failure: indexingFailures) {
|
||||
for (Failure failure: bulkFailures) {
|
||||
builder.startObject();
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
for (ShardSearchFailure failure: searchFailures) {
|
||||
builder.startObject();
|
||||
for (SearchFailure failure: searchFailures) {
|
||||
failure.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
return builder;
|
||||
|
@ -197,7 +175,7 @@ public class BulkIndexByScrollResponse extends ActionResponse implements ToXCont
|
|||
builder.append("took=").append(took).append(',');
|
||||
builder.append("timed_out=").append(timedOut).append(',');
|
||||
status.innerToString(builder);
|
||||
builder.append(",indexing_failures=").append(getIndexingFailures().subList(0, min(3, getIndexingFailures().size())));
|
||||
builder.append(",bulk_failures=").append(getBulkFailures().subList(0, min(3, getBulkFailures().size())));
|
||||
builder.append(",search_failures=").append(getSearchFailures().subList(0, min(3, getSearchFailures().size())));
|
||||
return builder.append(']').toString();
|
||||
}
|
||||
|
|
|
@ -21,9 +21,9 @@ package org.elasticsearch.index.reindex;
|
|||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
|
@ -61,13 +61,13 @@ public class BulkIndexByScrollResponseContentListener<R extends BulkIndexByScrol
|
|||
if (response.isTimedOut()) {
|
||||
status = RestStatus.REQUEST_TIMEOUT;
|
||||
}
|
||||
for (Failure failure : response.getIndexingFailures()) {
|
||||
for (Failure failure : response.getBulkFailures()) {
|
||||
if (failure.getStatus().getStatus() > status.getStatus()) {
|
||||
status = failure.getStatus();
|
||||
}
|
||||
}
|
||||
for (ShardSearchFailure failure: response.getSearchFailures()) {
|
||||
RestStatus failureStatus = ExceptionsHelper.status(failure.getCause());
|
||||
for (SearchFailure failure: response.getSearchFailures()) {
|
||||
RestStatus failureStatus = ExceptionsHelper.status(failure.getReason());
|
||||
if (failureStatus.getStatus() > status.getStatus()) {
|
||||
status = failureStatus;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,251 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.search.ClearScrollRequest;
|
||||
import org.elasticsearch.action.search.ClearScrollResponse;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ParentTaskAssigningClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
|
||||
import static org.elasticsearch.common.util.CollectionUtils.isEmpty;
|
||||
|
||||
/**
|
||||
* A scrollable source of hits from a {@linkplain Client} instance.
|
||||
*/
|
||||
public class ClientScrollableHitSource extends ScrollableHitSource {
|
||||
private final ParentTaskAssigningClient client;
|
||||
private final SearchRequest firstSearchRequest;
|
||||
|
||||
public ClientScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
|
||||
Consumer<Exception> fail, ParentTaskAssigningClient client, SearchRequest firstSearchRequest) {
|
||||
super(logger, backoffPolicy, threadPool, countSearchRetry, fail);
|
||||
this.client = client;
|
||||
this.firstSearchRequest = firstSearchRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void doStart(Consumer<? super Response> onResponse) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("executing initial scroll against {}{}",
|
||||
isEmpty(firstSearchRequest.indices()) ? "all indices" : firstSearchRequest.indices(),
|
||||
isEmpty(firstSearchRequest.types()) ? "" : firstSearchRequest.types());
|
||||
}
|
||||
searchWithRetry(listener -> client.search(firstSearchRequest, listener), r -> consume(r, onResponse));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer<? super Response> onResponse) {
|
||||
SearchScrollRequest request = new SearchScrollRequest();
|
||||
// Add the wait time into the scroll timeout so it won't timeout while we wait for throttling
|
||||
request.scrollId(scrollId).scroll(timeValueNanos(firstSearchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos()));
|
||||
searchWithRetry(listener -> client.searchScroll(request, listener), r -> consume(r, onResponse));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clearScroll(String scrollId) {
|
||||
/*
|
||||
* Fire off the clear scroll but don't wait for it it return before
|
||||
* we send the use their response.
|
||||
*/
|
||||
ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
|
||||
clearScrollRequest.addScrollId(scrollId);
|
||||
/*
|
||||
* Unwrap the client so we don't set our task as the parent. If we *did* set our ID then the clear scroll would be cancelled as
|
||||
* if this task is cancelled. But we want to clear the scroll regardless of whether or not the main request was cancelled.
|
||||
*/
|
||||
client.unwrap().clearScroll(clearScrollRequest, new ActionListener<ClearScrollResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClearScrollResponse response) {
|
||||
logger.debug("Freed [{}] contexts", response.getNumFreed());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
logger.warn("Failed to clear scroll [{}]", e, scrollId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
|
||||
* rejected execution.
|
||||
*
|
||||
* @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
|
||||
* @param onResponse consumes the response from the action
|
||||
*/
|
||||
private void searchWithRetry(Consumer<ActionListener<SearchResponse>> action, Consumer<SearchResponse> onResponse) {
|
||||
/*
|
||||
* RetryHelper is both an AbstractRunnable and an ActionListener<SearchResponse> - meaning that it both starts the search and
|
||||
* handles reacts to the results. The complexity is all in onFailure which either adapts the failure to the "fail" listener or
|
||||
* retries the search. Since both AbstractRunnable and ActionListener define the onFailure method it is called for either failure
|
||||
* to run the action (either while running or before starting) and for failure on the response from the action.
|
||||
*/
|
||||
class RetryHelper extends AbstractRunnable implements ActionListener<SearchResponse> {
|
||||
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
|
||||
private volatile int retryCount = 0;
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
action.accept(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onResponse(SearchResponse response) {
|
||||
onResponse.accept(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
|
||||
if (retries.hasNext()) {
|
||||
retryCount += 1;
|
||||
TimeValue delay = retries.next();
|
||||
logger.trace("retrying rejected search after [{}]", e, delay);
|
||||
countSearchRetry.run();
|
||||
threadPool.schedule(delay, ThreadPool.Names.SAME, this);
|
||||
} else {
|
||||
logger.warn("giving up on search because we retried [{}] times without success", e, retryCount);
|
||||
fail.accept(e);
|
||||
}
|
||||
} else {
|
||||
logger.warn("giving up on search because it failed with a non-retryable exception", e);
|
||||
fail.accept(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
new RetryHelper().run();
|
||||
}
|
||||
|
||||
private void consume(SearchResponse response, Consumer<? super Response> onResponse) {
|
||||
onResponse.accept(wrap(response));
|
||||
}
|
||||
|
||||
private Response wrap(SearchResponse response) {
|
||||
List<SearchFailure> failures;
|
||||
if (response.getShardFailures() == null) {
|
||||
failures = emptyList();
|
||||
} else {
|
||||
failures = new ArrayList<>(response.getShardFailures().length);
|
||||
for (ShardSearchFailure failure: response.getShardFailures()) {
|
||||
String nodeId = failure.shard() == null ? null : failure.shard().nodeId();
|
||||
failures.add(new SearchFailure(failure.getCause(), failure.index(), failure.shardId(), nodeId));
|
||||
}
|
||||
}
|
||||
List<Hit> hits;
|
||||
if (response.getHits().getHits() == null || response.getHits().getHits().length == 0) {
|
||||
hits = emptyList();
|
||||
} else {
|
||||
hits = new ArrayList<>(response.getHits().getHits().length);
|
||||
for (SearchHit hit: response.getHits().getHits()) {
|
||||
hits.add(new ClientHit(hit));
|
||||
}
|
||||
hits = unmodifiableList(hits);
|
||||
}
|
||||
return new Response(response.isTimedOut(), failures, response.getHits().getTotalHits(),
|
||||
hits, response.getScrollId());
|
||||
}
|
||||
|
||||
private static class ClientHit implements Hit {
|
||||
private final SearchHit delegate;
|
||||
private final BytesReference source;
|
||||
|
||||
public ClientHit(SearchHit delegate) {
|
||||
this.delegate = delegate;
|
||||
source = delegate.hasSource() ? null : delegate.getSourceRef();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getIndex() {
|
||||
return delegate.getIndex();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return delegate.getType();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getId() {
|
||||
return delegate.getId();
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getVersion() {
|
||||
return delegate.getVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getParent() {
|
||||
return fieldValue(ParentFieldMapper.NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRouting() {
|
||||
return fieldValue(RoutingFieldMapper.NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTimestamp() {
|
||||
return fieldValue(TimestampFieldMapper.NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTTL() {
|
||||
return fieldValue(TTLFieldMapper.NAME);
|
||||
}
|
||||
|
||||
private <T> T fieldValue(String fieldName) {
|
||||
SearchHitField field = delegate.field(fieldName);
|
||||
return field == null ? null : field.value();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,12 +23,15 @@ import org.elasticsearch.action.ActionRequest;
|
|||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.plugins.ActionPlugin;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestHandler;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
|
||||
public class ReindexPlugin extends Plugin implements ActionPlugin {
|
||||
public static final String NAME = "reindex";
|
||||
|
||||
|
@ -49,4 +52,9 @@ public class ReindexPlugin extends Plugin implements ActionPlugin {
|
|||
public void onModule(NetworkModule networkModule) {
|
||||
networkModule.registerTaskStatus(BulkByScrollTask.Status.NAME, BulkByScrollTask.Status::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return singletonList(TransportReindexAction.REMOTE_CLUSTER_WHITELIST);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,11 +27,13 @@ import org.elasticsearch.action.search.SearchRequest;
|
|||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
@ -48,6 +50,8 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
|
|||
*/
|
||||
private IndexRequest destination;
|
||||
|
||||
private RemoteInfo remoteInfo;
|
||||
|
||||
public ReindexRequest() {
|
||||
}
|
||||
|
||||
|
@ -90,6 +94,9 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
|
|||
if (destination.timestamp() != null) {
|
||||
e = addValidationError("setting timestamp on destination isn't supported. use scripts instead.", e);
|
||||
}
|
||||
if (getRemoteInfo() != null && getSearchRequest().source().query() != null) {
|
||||
e = addValidationError("reindex from remote sources should use RemoteInfo's query instead of source's query", e);
|
||||
}
|
||||
return e;
|
||||
}
|
||||
|
||||
|
@ -110,23 +117,36 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
|
|||
return destination;
|
||||
}
|
||||
|
||||
public void setRemoteInfo(RemoteInfo remoteInfo) {
|
||||
this.remoteInfo = remoteInfo;
|
||||
}
|
||||
|
||||
public RemoteInfo getRemoteInfo() {
|
||||
return remoteInfo;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
destination = new IndexRequest();
|
||||
destination.readFrom(in);
|
||||
remoteInfo = in.readOptionalWriteable(RemoteInfo::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
destination.writeTo(out);
|
||||
out.writeOptionalWriteable(remoteInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
b.append("reindex from ");
|
||||
if (remoteInfo != null) {
|
||||
b.append('[').append(remoteInfo).append(']');
|
||||
}
|
||||
searchToString(b);
|
||||
b.append(" to [").append(destination.index()).append(']');
|
||||
if (destination.type() != null) {
|
||||
|
@ -148,6 +168,9 @@ public class ReindexRequest extends AbstractBulkIndexByScrollRequest<ReindexRequ
|
|||
public List<? extends IndicesRequest> subRequests() {
|
||||
assert getSearchRequest() != null;
|
||||
assert getDestination() != null;
|
||||
if (remoteInfo != null) {
|
||||
return singletonList(getDestination());
|
||||
}
|
||||
return unmodifiableList(Arrays.asList(getSearchRequest(), getDestination()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.action.index.IndexRequestBuilder;
|
|||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
|
||||
public class ReindexRequestBuilder extends
|
||||
AbstractBulkIndexByScrollRequestBuilder<ReindexRequest, ReindexRequestBuilder> {
|
||||
|
@ -67,4 +68,12 @@ public class ReindexRequestBuilder extends
|
|||
destination.setIndex(index).setType(type);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Setup reindexing from a remote cluster.
|
||||
*/
|
||||
public ReindexRequestBuilder setRemoteInfo(RemoteInfo remoteInfo) {
|
||||
request().setRemoteInfo(remoteInfo);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,16 +27,21 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.rest.RestChannel;
|
||||
import org.elasticsearch.rest.RestController;
|
||||
|
@ -48,36 +53,39 @@ import org.elasticsearch.search.suggest.Suggesters;
|
|||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.regex.Matcher;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.common.unit.TimeValue.parseTimeValue;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.rest.RestRequest.Method.POST;
|
||||
|
||||
/**
|
||||
* Expose IndexBySearchRequest over rest.
|
||||
* Expose reindex over rest.
|
||||
*/
|
||||
public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexRequest, TransportReindexAction> {
|
||||
|
||||
private static final ObjectParser<ReindexRequest, ReindexParseContext> PARSER = new ObjectParser<>("reindex");
|
||||
static final ObjectParser<ReindexRequest, ReindexParseContext> PARSER = new ObjectParser<>("reindex");
|
||||
private static final Pattern HOST_PATTERN = Pattern.compile("(?<scheme>[^:]+)://(?<host>[^:]+):(?<port>\\d+)");
|
||||
|
||||
static {
|
||||
ObjectParser.Parser<SearchRequest, ReindexParseContext> sourceParser = (parser, search, context) -> {
|
||||
/*
|
||||
* Extract the parameters that we need from the source sent to the parser. We could do away with this hack when search source
|
||||
* has an ObjectParser.
|
||||
*/
|
||||
ObjectParser.Parser<ReindexRequest, ReindexParseContext> sourceParser = (parser, request, context) -> {
|
||||
// Funky hack to work around Search not having a proper ObjectParser and us wanting to extract query if using remote.
|
||||
Map<String, Object> source = parser.map();
|
||||
String[] indices = extractStringArray(source, "index");
|
||||
if (indices != null) {
|
||||
search.indices(indices);
|
||||
request.getSearchRequest().indices(indices);
|
||||
}
|
||||
String[] types = extractStringArray(source, "type");
|
||||
if (types != null) {
|
||||
search.types(types);
|
||||
request.getSearchRequest().types(types);
|
||||
}
|
||||
request.setRemoteInfo(buildRemoteInfo(source));
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(parser.contentType());
|
||||
builder.map(source);
|
||||
try (XContentParser innerParser = parser.contentType().xContent().createParser(builder.bytes())) {
|
||||
search.source().parseXContent(context.queryParseContext(innerParser), context.aggParsers, context.suggesters);
|
||||
request.getSearchRequest().source().parseXContent(context.queryParseContext(innerParser), context.aggParsers,
|
||||
context.suggesters);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -94,7 +102,7 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
|
|||
destParser.declareString((i, ttl) -> i.ttl(parseTimeValue(ttl, TimeValue.timeValueMillis(-1), "ttl").millis()),
|
||||
new ParseField("ttl"));
|
||||
|
||||
PARSER.declareField((p, v, c) -> sourceParser.parse(p, v.getSearchRequest(), c), new ParseField("source"), ValueType.OBJECT);
|
||||
PARSER.declareField((p, v, c) -> sourceParser.parse(p, v, c), new ParseField("source"), ValueType.OBJECT);
|
||||
PARSER.declareField((p, v, c) -> destParser.parse(p, v.getDestination(), c), new ParseField("dest"), ValueType.OBJECT);
|
||||
PARSER.declareInt(ReindexRequest::setSize, new ParseField("size"));
|
||||
PARSER.declareField((p, v, c) -> v.setScript(Script.parse(p, c.getParseFieldMatcher())), new ParseField("script"),
|
||||
|
@ -127,6 +135,29 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
|
|||
return internal;
|
||||
}
|
||||
|
||||
static RemoteInfo buildRemoteInfo(Map<String, Object> source) throws IOException {
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> remote = (Map<String, Object>) source.remove("remote");
|
||||
if (remote == null) {
|
||||
return null;
|
||||
}
|
||||
String username = extractString(remote, "username");
|
||||
String password = extractString(remote, "password");
|
||||
String hostInRequest = requireNonNull(extractString(remote, "host"), "[host] must be specified to reindex from a remote cluster");
|
||||
Matcher hostMatcher = HOST_PATTERN.matcher(hostInRequest);
|
||||
if (false == hostMatcher.matches()) {
|
||||
throw new IllegalArgumentException("[host] must be of the form [scheme]://[host]:[port] but was [" + hostInRequest + "]");
|
||||
}
|
||||
String scheme = hostMatcher.group("scheme");
|
||||
String host = hostMatcher.group("host");
|
||||
int port = Integer.parseInt(hostMatcher.group("port"));
|
||||
if (false == remote.isEmpty()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Unsupported fields in [remote]: [" + Strings.collectionToCommaDelimitedString(remote.keySet()) + "]");
|
||||
}
|
||||
return new RemoteInfo(scheme, host, port, queryForRemote(source), username, password);
|
||||
}
|
||||
|
||||
/**
|
||||
* Yank a string array from a map. Emulates XContent's permissive String to
|
||||
* String array conversions.
|
||||
|
@ -147,7 +178,32 @@ public class RestReindexAction extends AbstractBaseReindexRestHandler<ReindexReq
|
|||
}
|
||||
}
|
||||
|
||||
private class ReindexParseContext implements ParseFieldMatcherSupplier {
|
||||
private static String extractString(Map<String, Object> source, String name) {
|
||||
Object value = source.remove(name);
|
||||
if (value == null) {
|
||||
return null;
|
||||
}
|
||||
if (value instanceof String) {
|
||||
return (String) value;
|
||||
}
|
||||
throw new IllegalArgumentException("Expected [" + name + "] to be a string but was [" + value + "]");
|
||||
}
|
||||
|
||||
private static BytesReference queryForRemote(Map<String, Object> source) throws IOException {
|
||||
XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint();
|
||||
Object query = source.remove("query");
|
||||
if (query == null) {
|
||||
return matchAllQuery().toXContent(builder, ToXContent.EMPTY_PARAMS).bytes();
|
||||
}
|
||||
if (!(query instanceof Map)) {
|
||||
throw new IllegalArgumentException("Expected [query] to be an object but was [" + query + "]");
|
||||
}
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> map = (Map<String, Object>) query;
|
||||
return builder.map(map).bytes();
|
||||
}
|
||||
|
||||
static class ReindexParseContext implements ParseFieldMatcherSupplier {
|
||||
private final IndicesQueriesRegistry indicesQueryRegistry;
|
||||
private final ParseFieldMatcher parseFieldMatcher;
|
||||
private final AggregatorParsers aggParsers;
|
||||
|
|
|
@ -0,0 +1,357 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
/**
|
||||
* A scrollable source of results.
|
||||
*/
|
||||
public abstract class ScrollableHitSource implements Closeable {
|
||||
private final AtomicReference<String> scrollId = new AtomicReference<>();
|
||||
|
||||
protected final ESLogger logger;
|
||||
protected final BackoffPolicy backoffPolicy;
|
||||
protected final ThreadPool threadPool;
|
||||
protected final Runnable countSearchRetry;
|
||||
protected final Consumer<Exception> fail;
|
||||
|
||||
public ScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
|
||||
Consumer<Exception> fail) {
|
||||
this.logger = logger;
|
||||
this.backoffPolicy = backoffPolicy;
|
||||
this.threadPool = threadPool;
|
||||
this.countSearchRetry = countSearchRetry;
|
||||
this.fail = fail;
|
||||
}
|
||||
|
||||
public final void start(Consumer<Response> onResponse) {
|
||||
doStart(response -> {
|
||||
setScroll(response.getScrollId());
|
||||
logger.debug("scroll returned [{}] documents with a scroll id of [{}]", response.getHits().size(), response.getScrollId());
|
||||
onResponse.accept(response);
|
||||
});
|
||||
}
|
||||
protected abstract void doStart(Consumer<? super Response> onResponse);
|
||||
|
||||
public final void startNextScroll(TimeValue extraKeepAlive, Consumer<Response> onResponse) {
|
||||
doStartNextScroll(scrollId.get(), extraKeepAlive, response -> {
|
||||
setScroll(response.getScrollId());
|
||||
onResponse.accept(response);
|
||||
});
|
||||
}
|
||||
protected abstract void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer<? super Response> onResponse);
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
String scrollId = this.scrollId.get();
|
||||
if (Strings.hasLength(scrollId)) {
|
||||
clearScroll(scrollId);
|
||||
}
|
||||
}
|
||||
protected abstract void clearScroll(String scrollId);
|
||||
|
||||
/**
|
||||
* Set the id of the last scroll. Used for debugging.
|
||||
*/
|
||||
final void setScroll(String scrollId) {
|
||||
this.scrollId.set(scrollId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Response from each scroll batch.
|
||||
*/
|
||||
public static class Response {
|
||||
private final boolean timedOut;
|
||||
private final List<SearchFailure> failures;
|
||||
private final long totalHits;
|
||||
private final List<? extends Hit> hits;
|
||||
private final String scrollId;
|
||||
|
||||
public Response(boolean timedOut, List<SearchFailure> failures, long totalHits, List<? extends Hit> hits, String scrollId) {
|
||||
this.timedOut = timedOut;
|
||||
this.failures = failures;
|
||||
this.totalHits = totalHits;
|
||||
this.hits = hits;
|
||||
this.scrollId = scrollId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Did this batch time out?
|
||||
*/
|
||||
public boolean isTimedOut() {
|
||||
return timedOut;
|
||||
}
|
||||
|
||||
/**
|
||||
* Where there any search failures?
|
||||
*/
|
||||
public final List<SearchFailure> getFailures() {
|
||||
return failures;
|
||||
}
|
||||
|
||||
/**
|
||||
* What were the total number of documents matching the search?
|
||||
*/
|
||||
public long getTotalHits() {
|
||||
return totalHits;
|
||||
}
|
||||
|
||||
/**
|
||||
* The documents returned in this batch.
|
||||
*/
|
||||
public List<? extends Hit> getHits() {
|
||||
return hits;
|
||||
}
|
||||
|
||||
/**
|
||||
* The scroll id used to fetch the next set of documents.
|
||||
*/
|
||||
public String getScrollId() {
|
||||
return scrollId;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A document returned as part of the response. Think of it like {@link SearchHit} but with all the things reindex needs in convenient
|
||||
* methods.
|
||||
*/
|
||||
public interface Hit {
|
||||
String getIndex();
|
||||
String getType();
|
||||
String getId();
|
||||
long getVersion();
|
||||
/**
|
||||
* The source of the hit. Returns null if the source didn't come back from the search, usually because it source wasn't stored at
|
||||
* all.
|
||||
*/
|
||||
@Nullable BytesReference getSource();
|
||||
@Nullable String getParent();
|
||||
@Nullable String getRouting();
|
||||
@Nullable Long getTimestamp();
|
||||
@Nullable Long getTTL();
|
||||
}
|
||||
|
||||
/**
|
||||
* An implementation of {@linkplain Hit} that uses getters and setters. Primarily used for testing and {@link RemoteScrollableHitSource}
|
||||
* .
|
||||
*/
|
||||
public static class BasicHit implements Hit {
|
||||
private final String index;
|
||||
private final String type;
|
||||
private final String id;
|
||||
private final long version;
|
||||
|
||||
private BytesReference source;
|
||||
private String parent;
|
||||
private String routing;
|
||||
private Long timestamp;
|
||||
private Long ttl;
|
||||
|
||||
public BasicHit(String index, String type, String id, long version) {
|
||||
this.index = index;
|
||||
this.type = type;
|
||||
this.id = id;
|
||||
this.version = version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getIndex() {
|
||||
return index;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getType() {
|
||||
return type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getId() {
|
||||
return id;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getVersion() {
|
||||
return version;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BytesReference getSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
public BasicHit setSource(BytesReference source) {
|
||||
this.source = source;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getParent() {
|
||||
return parent;
|
||||
}
|
||||
|
||||
public BasicHit setParent(String parent) {
|
||||
this.parent = parent;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRouting() {
|
||||
return routing;
|
||||
}
|
||||
|
||||
public BasicHit setRouting(String routing) {
|
||||
this.routing = routing;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTimestamp() {
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
public BasicHit setTimestamp(Long timestamp) {
|
||||
this.timestamp = timestamp;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTTL() {
|
||||
return ttl;
|
||||
}
|
||||
|
||||
public BasicHit setTTL(Long ttl) {
|
||||
this.ttl = ttl;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A failure during search. Like {@link ShardSearchFailure} but useful for reindex from remote as well.
|
||||
*/
|
||||
public static class SearchFailure implements Writeable, ToXContent {
|
||||
private final Throwable reason;
|
||||
@Nullable
|
||||
private final String index;
|
||||
@Nullable
|
||||
private final Integer shardId;
|
||||
@Nullable
|
||||
private final String nodeId;
|
||||
|
||||
public SearchFailure(Throwable reason, @Nullable String index, @Nullable Integer shardId, @Nullable String nodeId) {
|
||||
this.index = index;
|
||||
this.shardId = shardId;
|
||||
this.reason = requireNonNull(reason, "reason cannot be null");
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a search failure that doesn't have shard information available.
|
||||
*/
|
||||
public SearchFailure(Throwable reason) {
|
||||
this(reason, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public SearchFailure(StreamInput in) throws IOException {
|
||||
reason = in.readException();
|
||||
index = in.readOptionalString();
|
||||
shardId = in.readOptionalVInt();
|
||||
nodeId = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeException(reason);
|
||||
out.writeOptionalString(index);
|
||||
out.writeOptionalVInt(shardId);
|
||||
out.writeOptionalString(nodeId);
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public Integer getShardId() {
|
||||
return shardId;
|
||||
}
|
||||
|
||||
public Throwable getReason() {
|
||||
return reason;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String getNodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
if (index != null) {
|
||||
builder.field("index", index);
|
||||
}
|
||||
if (shardId != null) {
|
||||
builder.field("shard", shardId);
|
||||
}
|
||||
if (nodeId != null) {
|
||||
builder.field("node", nodeId);
|
||||
}
|
||||
builder.field("reason");
|
||||
{
|
||||
builder.startObject();
|
||||
ElasticsearchException.toXContent(builder, params, reason);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return Strings.toString(this);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -31,10 +31,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.mapper.internal.ParentFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -74,35 +71,35 @@ public class TransportDeleteByQueryAction extends HandledTransportAction<DeleteB
|
|||
public AsyncDeleteBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
|
||||
DeleteByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener,
|
||||
ScriptService scriptService, ClusterState clusterState) {
|
||||
super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState);
|
||||
super(task, logger, client, threadPool, request, listener, scriptService, clusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean accept(SearchHit doc) {
|
||||
protected boolean accept(ScrollableHitSource.Hit doc) {
|
||||
// Delete-by-query does not require the source to delete a document
|
||||
// and the default implementation checks for it
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RequestWrapper<DeleteRequest> buildRequest(SearchHit doc) {
|
||||
protected RequestWrapper<DeleteRequest> buildRequest(ScrollableHitSource.Hit doc) {
|
||||
DeleteRequest delete = new DeleteRequest();
|
||||
delete.index(doc.index());
|
||||
delete.type(doc.type());
|
||||
delete.id(doc.id());
|
||||
delete.version(doc.version());
|
||||
delete.index(doc.getIndex());
|
||||
delete.type(doc.getType());
|
||||
delete.id(doc.getId());
|
||||
delete.version(doc.getVersion());
|
||||
return wrap(delete);
|
||||
}
|
||||
|
||||
/**
|
||||
* Overrides the parent {@link AbstractAsyncBulkIndexByScrollAction#copyMetadata(RequestWrapper, SearchHit)}
|
||||
* Overrides the parent {@link AbstractAsyncBulkIndexByScrollAction#copyMetadata(RequestWrapper, ScrollableHitSource.Hit)}
|
||||
* method that is much more Update/Reindex oriented and so also copies things like timestamp/ttl which we
|
||||
* don't care for a deletion.
|
||||
*/
|
||||
@Override
|
||||
protected RequestWrapper<?> copyMetadata(RequestWrapper<?> request, SearchHit doc) {
|
||||
copyParent(request, fieldValue(doc, ParentFieldMapper.NAME));
|
||||
copyRouting(request, fieldValue(doc, RoutingFieldMapper.NAME));
|
||||
protected RequestWrapper<?> copyMetadata(RequestWrapper<?> request, ScrollableHitSource.Hit doc) {
|
||||
request.setParent(doc.getParent());
|
||||
request.setRouting(doc.getRouting());
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.apache.http.HttpHost;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
|
@ -28,51 +30,72 @@ import org.elasticsearch.action.support.AutoCreateIndex;
|
|||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ParentTaskAssigningClient;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.VersionFieldMapper;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.index.VersionType.INTERNAL;
|
||||
|
||||
public class TransportReindexAction extends HandledTransportAction<ReindexRequest, BulkIndexByScrollResponse> {
|
||||
public static final Setting<List<String>> REMOTE_CLUSTER_WHITELIST =
|
||||
Setting.listSetting("reindex.remote.whitelist", emptyList(), Function.identity(), Property.NodeScope);
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final ScriptService scriptService;
|
||||
private final AutoCreateIndex autoCreateIndex;
|
||||
private final Client client;
|
||||
private final Set<String> remoteWhitelist;
|
||||
private final NodeService nodeService;
|
||||
|
||||
@Inject
|
||||
public TransportReindexAction(Settings settings, ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, ClusterService clusterService, ScriptService scriptService,
|
||||
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService) {
|
||||
AutoCreateIndex autoCreateIndex, Client client, TransportService transportService, NodeService nodeService) {
|
||||
super(settings, ReindexAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ReindexRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.scriptService = scriptService;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.client = client;
|
||||
remoteWhitelist = new HashSet<>(REMOTE_CLUSTER_WHITELIST.get(settings));
|
||||
this.nodeService = nodeService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, ReindexRequest request, ActionListener<BulkIndexByScrollResponse> listener) {
|
||||
checkRemoteWhitelist(request.getRemoteInfo());
|
||||
ClusterState state = clusterService.state();
|
||||
validateAgainstAliases(request.getSearchRequest(), request.getDestination(), indexNameExpressionResolver, autoCreateIndex, state);
|
||||
validateAgainstAliases(request.getSearchRequest(), request.getDestination(), request.getRemoteInfo(), indexNameExpressionResolver,
|
||||
autoCreateIndex, state);
|
||||
ParentTaskAssigningClient client = new ParentTaskAssigningClient(this.client, clusterService.localNode(), task);
|
||||
new AsyncIndexBySearchAction((BulkByScrollTask) task, logger, client, threadPool, request, listener, scriptService, state).start();
|
||||
}
|
||||
|
@ -82,15 +105,43 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
throw new UnsupportedOperationException("task required");
|
||||
}
|
||||
|
||||
private void checkRemoteWhitelist(RemoteInfo remoteInfo) {
|
||||
TransportAddress publishAddress = null;
|
||||
HttpInfo httpInfo = nodeService.info().getHttp();
|
||||
if (httpInfo != null && httpInfo.getAddress() != null) {
|
||||
publishAddress = httpInfo.getAddress().publishAddress();
|
||||
}
|
||||
checkRemoteWhitelist(remoteWhitelist, remoteInfo, publishAddress);
|
||||
}
|
||||
|
||||
static void checkRemoteWhitelist(Set<String> whitelist, RemoteInfo remoteInfo, TransportAddress publishAddress) {
|
||||
if (remoteInfo == null) return;
|
||||
String check = remoteInfo.getHost() + ':' + remoteInfo.getPort();
|
||||
if (whitelist.contains(check)) return;
|
||||
/*
|
||||
* For testing we support the key "myself" to allow connecting to the local node. We can't just change the setting to include the
|
||||
* local node because it is intentionally not a dynamic setting for security purposes. We can't use something like "localhost:9200"
|
||||
* because we don't know up front which port we'll get because the tests bind to port 0. Instead we try to resolve it here, taking
|
||||
* "myself" to mean "my published http address".
|
||||
*/
|
||||
if (whitelist.contains("myself") && publishAddress != null && publishAddress.toString().equals(check)) {
|
||||
return;
|
||||
}
|
||||
throw new IllegalArgumentException('[' + check + "] not whitelisted in " + REMOTE_CLUSTER_WHITELIST.getKey());
|
||||
}
|
||||
|
||||
/**
|
||||
* Throws an ActionRequestValidationException if the request tries to index
|
||||
* back into the same index or into an index that points to two indexes.
|
||||
* This cannot be done during request validation because the cluster state
|
||||
* isn't available then. Package private for testing.
|
||||
*/
|
||||
static String validateAgainstAliases(SearchRequest source, IndexRequest destination,
|
||||
static void validateAgainstAliases(SearchRequest source, IndexRequest destination, RemoteInfo remoteInfo,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, AutoCreateIndex autoCreateIndex,
|
||||
ClusterState clusterState) {
|
||||
if (remoteInfo != null) {
|
||||
return;
|
||||
}
|
||||
String target = destination.index();
|
||||
if (false == autoCreateIndex.shouldAutoCreate(target, clusterState)) {
|
||||
/*
|
||||
|
@ -107,7 +158,6 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
throw e;
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -121,11 +171,30 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
|
||||
ReindexRequest request, ActionListener<BulkIndexByScrollResponse> listener,
|
||||
ScriptService scriptService, ClusterState clusterState) {
|
||||
super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState);
|
||||
super(task, logger, client, threadPool, request, listener, scriptService, clusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BiFunction<RequestWrapper<?>, SearchHit, RequestWrapper<?>> buildScriptApplier() {
|
||||
protected ScrollableHitSource buildScrollableResultSource(BackoffPolicy backoffPolicy) {
|
||||
if (mainRequest.getRemoteInfo() != null) {
|
||||
// NORELEASE track 500-level retries that are builtin to the client
|
||||
RemoteInfo remoteInfo = mainRequest.getRemoteInfo();
|
||||
if (remoteInfo.getUsername() != null) {
|
||||
// NORELEASE support auth
|
||||
throw new UnsupportedOperationException("Auth is unsupported");
|
||||
}
|
||||
RestClient restClient = RestClient.builder(new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme()))
|
||||
.build();
|
||||
RemoteScrollableHitSource.AsyncClient client = new RemoteScrollableHitSource.AsynchronizingRestClient(threadPool,
|
||||
restClient);
|
||||
return new RemoteScrollableHitSource(logger, backoffPolicy, threadPool, task::countSearchRetry, this::finishHim, client,
|
||||
remoteInfo.getQuery(), mainRequest.getSearchRequest());
|
||||
}
|
||||
return super.buildScrollableResultSource(backoffPolicy);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> buildScriptApplier() {
|
||||
Script script = mainRequest.getScript();
|
||||
if (script != null) {
|
||||
return new ReindexScriptApplier(task, scriptService, script, script.getParams());
|
||||
|
@ -134,7 +203,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RequestWrapper<IndexRequest> buildRequest(SearchHit doc) {
|
||||
protected RequestWrapper<IndexRequest> buildRequest(ScrollableHitSource.Hit doc) {
|
||||
IndexRequest index = new IndexRequest();
|
||||
|
||||
// Copy the index from the request so we always write where it asked to write
|
||||
|
@ -142,7 +211,7 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
|
||||
// If the request override's type then the user wants all documents in that type. Otherwise keep the doc's type.
|
||||
if (mainRequest.getDestination().type() == null) {
|
||||
index.type(doc.type());
|
||||
index.type(doc.getType());
|
||||
} else {
|
||||
index.type(mainRequest.getDestination().type());
|
||||
}
|
||||
|
@ -155,12 +224,12 @@ public class TransportReindexAction extends HandledTransportAction<ReindexReques
|
|||
if (index.versionType() == INTERNAL) {
|
||||
index.version(mainRequest.getDestination().version());
|
||||
} else {
|
||||
index.version(doc.version());
|
||||
index.version(doc.getVersion());
|
||||
}
|
||||
|
||||
// id and source always come from the found doc. Scripts can change them but they operate on the index request.
|
||||
index.id(doc.id());
|
||||
index.source(doc.sourceRef());
|
||||
index.id(doc.getId());
|
||||
index.source(doc.getSource());
|
||||
|
||||
/*
|
||||
* The rest of the index request just has to be copied from the template. It may be changed later from scripts or the superclass
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.elasticsearch.action.TaskOperationFailure;
|
|||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.tasks.TransportTasksAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
|||
import org.elasticsearch.index.mapper.internal.TypeFieldMapper;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -85,11 +84,11 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
|
|||
public AsyncIndexBySearchAction(BulkByScrollTask task, ESLogger logger, ParentTaskAssigningClient client, ThreadPool threadPool,
|
||||
UpdateByQueryRequest request, ActionListener<BulkIndexByScrollResponse> listener,
|
||||
ScriptService scriptService, ClusterState clusterState) {
|
||||
super(task, logger, client, threadPool, request, request.getSearchRequest(), listener, scriptService, clusterState);
|
||||
super(task, logger, client, threadPool, request, listener, scriptService, clusterState);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BiFunction<RequestWrapper<?>, SearchHit, RequestWrapper<?>> buildScriptApplier() {
|
||||
protected BiFunction<RequestWrapper<?>, ScrollableHitSource.Hit, RequestWrapper<?>> buildScriptApplier() {
|
||||
Script script = mainRequest.getScript();
|
||||
if (script != null) {
|
||||
return new UpdateByQueryScriptApplier(task, scriptService, script, script.getParams());
|
||||
|
@ -98,14 +97,14 @@ public class TransportUpdateByQueryAction extends HandledTransportAction<UpdateB
|
|||
}
|
||||
|
||||
@Override
|
||||
protected RequestWrapper<IndexRequest> buildRequest(SearchHit doc) {
|
||||
protected RequestWrapper<IndexRequest> buildRequest(ScrollableHitSource.Hit doc) {
|
||||
IndexRequest index = new IndexRequest();
|
||||
index.index(doc.index());
|
||||
index.type(doc.type());
|
||||
index.id(doc.id());
|
||||
index.source(doc.sourceRef());
|
||||
index.index(doc.getIndex());
|
||||
index.type(doc.getType());
|
||||
index.id(doc.getId());
|
||||
index.source(doc.getSource());
|
||||
index.versionType(VersionType.INTERNAL);
|
||||
index.version(doc.version());
|
||||
index.version(doc.getVersion());
|
||||
index.setPipeline(mainRequest.getPipeline());
|
||||
return wrap(index);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static java.util.Objects.requireNonNull;
|
||||
|
||||
public class RemoteInfo implements Writeable {
|
||||
private final String scheme;
|
||||
private final String host;
|
||||
private final int port;
|
||||
private final BytesReference query;
|
||||
private final String username;
|
||||
private final String password;
|
||||
|
||||
public RemoteInfo(String scheme, String host, int port, BytesReference query, String username, String password) {
|
||||
this.scheme = requireNonNull(scheme, "[scheme] must be specified to reindex from a remote cluster");
|
||||
this.host = requireNonNull(host, "[host] must be specified to reindex from a remote cluster");
|
||||
this.port = port;
|
||||
this.query = requireNonNull(query, "[query] must be specified to reindex from a remote cluster");
|
||||
this.username = username;
|
||||
this.password = password;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read from a stream.
|
||||
*/
|
||||
public RemoteInfo(StreamInput in) throws IOException {
|
||||
scheme = in.readString();
|
||||
host = in.readString();
|
||||
port = in.readVInt();
|
||||
query = in.readBytesReference();
|
||||
username = in.readOptionalString();
|
||||
password = in.readOptionalString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(scheme);
|
||||
out.writeString(host);
|
||||
out.writeVInt(port);
|
||||
out.writeBytesReference(query);
|
||||
out.writeOptionalString(username);
|
||||
out.writeOptionalString(password);
|
||||
}
|
||||
|
||||
public String getScheme() {
|
||||
return scheme;
|
||||
}
|
||||
|
||||
public String getHost() {
|
||||
return host;
|
||||
}
|
||||
|
||||
public int getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public BytesReference getQuery() {
|
||||
return query;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String getUsername() {
|
||||
return username;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String getPassword() {
|
||||
return password;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder b = new StringBuilder();
|
||||
if (false == "http".equals(scheme)) {
|
||||
// http is the default so it isn't worth taking up space if it is the scheme
|
||||
b.append("scheme=").append(scheme).append(' ');
|
||||
}
|
||||
b.append("host=").append(host).append(" port=").append(port).append(" query=").append(query.utf8ToString());
|
||||
if (username != null) {
|
||||
b.append(" username=").append(username);
|
||||
}
|
||||
if (password != null) {
|
||||
b.append(" password=<<>>");
|
||||
}
|
||||
return b.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.search.sort.FieldSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
final class RemoteRequestBuilders {
|
||||
private RemoteRequestBuilders() {}
|
||||
|
||||
static String initialSearchPath(SearchRequest searchRequest) {
|
||||
// It is nasty to build paths with StringBuilder but we'll be careful....
|
||||
StringBuilder path = new StringBuilder("/");
|
||||
addIndexesOrTypes(path, "Index", searchRequest.indices());
|
||||
addIndexesOrTypes(path, "Type", searchRequest.types());
|
||||
path.append("_search");
|
||||
return path.toString();
|
||||
}
|
||||
|
||||
static Map<String, String> initialSearchParams(SearchRequest searchRequest, Version remoteVersion) {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
if (searchRequest.scroll() != null) {
|
||||
params.put("scroll", searchRequest.scroll().keepAlive().toString());
|
||||
}
|
||||
params.put("size", Integer.toString(searchRequest.source().size()));
|
||||
if (searchRequest.source().version() == null || searchRequest.source().version() == true) {
|
||||
// false is the only value that makes it false. Null defaults to true....
|
||||
params.put("version", null);
|
||||
}
|
||||
if (searchRequest.source().sorts() != null) {
|
||||
boolean useScan = false;
|
||||
// Detect if we should use search_type=scan rather than a sort
|
||||
if (remoteVersion.before(Version.V_2_1_0)) {
|
||||
for (SortBuilder<?> sort : searchRequest.source().sorts()) {
|
||||
if (sort instanceof FieldSortBuilder) {
|
||||
FieldSortBuilder f = (FieldSortBuilder) sort;
|
||||
if (f.getFieldName().equals(FieldSortBuilder.DOC_FIELD_NAME)) {
|
||||
useScan = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (useScan) {
|
||||
params.put("search_type", "scan");
|
||||
} else {
|
||||
StringBuilder sorts = new StringBuilder(sortToUri(searchRequest.source().sorts().get(0)));
|
||||
for (int i = 1; i < searchRequest.source().sorts().size(); i++) {
|
||||
sorts.append(',').append(sortToUri(searchRequest.source().sorts().get(i)));
|
||||
}
|
||||
params.put("sorts", sorts.toString());
|
||||
}
|
||||
}
|
||||
if (searchRequest.source().storedFields() != null && false == searchRequest.source().storedFields().isEmpty()) {
|
||||
StringBuilder fields = new StringBuilder(searchRequest.source().storedFields().get(0));
|
||||
for (int i = 1; i < searchRequest.source().storedFields().size(); i++) {
|
||||
fields.append(',').append(searchRequest.source().storedFields().get(i));
|
||||
}
|
||||
String storedFieldsParamName = remoteVersion.before(Version.V_5_0_0_alpha4) ? "fields" : "stored_fields";
|
||||
params.put(storedFieldsParamName, fields.toString());
|
||||
}
|
||||
return params;
|
||||
}
|
||||
|
||||
static HttpEntity initialSearchEntity(BytesReference query) {
|
||||
try (XContentBuilder entity = JsonXContent.contentBuilder(); XContentParser queryParser = XContentHelper.createParser(query)) {
|
||||
entity.startObject();
|
||||
entity.field("query");
|
||||
/*
|
||||
* We're intentionally a bit paranoid here - copying the query as xcontent rather than writing a raw field. We don't want poorly
|
||||
* written queries to escape. Ever.
|
||||
*/
|
||||
entity.copyCurrentStructure(queryParser);
|
||||
XContentParser.Token shouldBeEof = queryParser.nextToken();
|
||||
if (shouldBeEof != null) {
|
||||
throw new ElasticsearchException(
|
||||
"query was more than a single object. This first token after the object is [" + shouldBeEof + "]");
|
||||
}
|
||||
entity.endObject();
|
||||
BytesRef bytes = entity.bytes().toBytesRef();
|
||||
return new ByteArrayEntity(bytes.bytes, bytes.offset, bytes.length, ContentType.APPLICATION_JSON);
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("unexpected error building entity", e);
|
||||
}
|
||||
}
|
||||
|
||||
private static void addIndexesOrTypes(StringBuilder path, String name, String[] indicesOrTypes) {
|
||||
if (indicesOrTypes == null || indicesOrTypes.length == 0) {
|
||||
return;
|
||||
}
|
||||
for (String indexOrType : indicesOrTypes) {
|
||||
checkIndexOrType(name, indexOrType);
|
||||
}
|
||||
path.append(Strings.arrayToCommaDelimitedString(indicesOrTypes)).append('/');
|
||||
}
|
||||
|
||||
private static void checkIndexOrType(String name, String indexOrType) {
|
||||
if (indexOrType.indexOf(',') >= 0) {
|
||||
throw new IllegalArgumentException(name + " containing [,] not supported but got [" + indexOrType + "]");
|
||||
}
|
||||
if (indexOrType.indexOf('/') >= 0) {
|
||||
throw new IllegalArgumentException(name + " containing [/] not supported but got [" + indexOrType + "]");
|
||||
}
|
||||
}
|
||||
|
||||
private static String sortToUri(SortBuilder<?> sort) {
|
||||
if (sort instanceof FieldSortBuilder) {
|
||||
FieldSortBuilder f = (FieldSortBuilder) sort;
|
||||
return f.getFieldName() + ":" + f.order();
|
||||
}
|
||||
throw new IllegalArgumentException("Unsupported sort [" + sort + "]");
|
||||
}
|
||||
|
||||
static String scrollPath() {
|
||||
return "/_search/scroll";
|
||||
}
|
||||
|
||||
static Map<String, String> scrollParams(TimeValue keepAlive) {
|
||||
return singletonMap("scroll", keepAlive.toString());
|
||||
}
|
||||
|
||||
static HttpEntity scrollEntity(String scroll) {
|
||||
return new StringEntity(scroll, ContentType.TEXT_PLAIN);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,301 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentLocation;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.BasicHit;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.Hit;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.Response;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
/**
|
||||
* Parsers to convert the response from the remote host into objects useful for {@link RemoteScrollableHitSource}. Lots of data is
|
||||
* intentionally thrown on the floor because we don't need it but ObjectParser and friends are strict about blowing up when they see
|
||||
* elements they don't understand. So you'll see a lot of BiConsumers that look like "(b, v) -> {}". That means "I don't care about the
|
||||
* value here, just throw it away and don't blow up.
|
||||
*/
|
||||
final class RemoteResponseParsers {
|
||||
private RemoteResponseParsers() {}
|
||||
|
||||
/**
|
||||
* Parser for an individual {@code hit} element.
|
||||
*/
|
||||
public static final ConstructingObjectParser<BasicHit, ParseFieldMatcherSupplier> HIT_PARSER = new ConstructingObjectParser<>("hit",
|
||||
a -> {
|
||||
int i = 0;
|
||||
String index = (String) a[i++];
|
||||
String type = (String) a[i++];
|
||||
String id = (String) a[i++];
|
||||
long version = (long) a[i++];
|
||||
return new BasicHit(index, type, id, version);
|
||||
});
|
||||
static {
|
||||
HIT_PARSER.declareString(constructorArg(), new ParseField("_index"));
|
||||
HIT_PARSER.declareString(constructorArg(), new ParseField("_type"));
|
||||
HIT_PARSER.declareString(constructorArg(), new ParseField("_id"));
|
||||
HIT_PARSER.declareLong(constructorArg(), new ParseField("_version"));
|
||||
HIT_PARSER.declareObject(BasicHit::setSource, (p, s) -> {
|
||||
try {
|
||||
/*
|
||||
* We spool the data from the remote back into xcontent so we can get bytes to send. There ought to be a better way but for
|
||||
* now this should do.
|
||||
*/
|
||||
try (XContentBuilder b = JsonXContent.contentBuilder()) {
|
||||
b.copyCurrentStructure(p);
|
||||
return b.bytes();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ParsingException(p.getTokenLocation(), "[hit] failed to parse [_source]", e);
|
||||
}
|
||||
}, new ParseField("_source"));
|
||||
HIT_PARSER.declareString(BasicHit::setRouting, new ParseField("_routing"));
|
||||
HIT_PARSER.declareString(BasicHit::setParent, new ParseField("_parent"));
|
||||
HIT_PARSER.declareLong(BasicHit::setTTL, new ParseField("_ttl"));
|
||||
HIT_PARSER.declareLong(BasicHit::setTimestamp, new ParseField("_timestamp"));
|
||||
HIT_PARSER.declareField((b, v) -> {}, p -> null, new ParseField("_score"), ValueType.FLOAT_OR_NULL);
|
||||
HIT_PARSER.declareStringArray((b, v) -> {}, new ParseField("sort"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Parser for the {@code hits} element. Parsed to an array of {@code [total (Long), hits (List<Hit>)]}.
|
||||
*/
|
||||
public static final ConstructingObjectParser<Object[], ParseFieldMatcherSupplier> HITS_PARSER = new ConstructingObjectParser<>("hits",
|
||||
a -> a);
|
||||
static {
|
||||
HITS_PARSER.declareLong(constructorArg(), new ParseField("total"));
|
||||
HITS_PARSER.declareObjectArray(constructorArg(), HIT_PARSER, new ParseField("hits"));
|
||||
HITS_PARSER.declareField((b, v) -> {}, p -> null, new ParseField("max_score"), ValueType.FLOAT_OR_NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parser for {@code failed} shards in the {@code _shards} elements.
|
||||
*/
|
||||
public static final ConstructingObjectParser<SearchFailure, ParseFieldMatcherSupplier> SEARCH_FAILURE_PARSER =
|
||||
new ConstructingObjectParser<>("failure", a -> {
|
||||
int i = 0;
|
||||
String index = (String) a[i++];
|
||||
Integer shardId = (Integer) a[i++];
|
||||
String nodeId = (String) a[i++];
|
||||
Object reason = a[i++];
|
||||
|
||||
Throwable reasonThrowable;
|
||||
if (reason instanceof String) {
|
||||
reasonThrowable = new RuntimeException("Unknown remote exception with reason=[" + (String) reason + "]");
|
||||
} else {
|
||||
reasonThrowable = (Throwable) reason;
|
||||
}
|
||||
return new SearchFailure(reasonThrowable, index, shardId, nodeId);
|
||||
});
|
||||
static {
|
||||
SEARCH_FAILURE_PARSER.declareString(optionalConstructorArg(), new ParseField("index"));
|
||||
SEARCH_FAILURE_PARSER.declareInt(optionalConstructorArg(), new ParseField("shard"));
|
||||
SEARCH_FAILURE_PARSER.declareString(optionalConstructorArg(), new ParseField("node"));
|
||||
SEARCH_FAILURE_PARSER.declareField(constructorArg(), (p, c) -> {
|
||||
if (p.currentToken() == XContentParser.Token.START_OBJECT) {
|
||||
return ThrowableBuilder.PARSER.apply(p, c);
|
||||
} else {
|
||||
return p.text();
|
||||
}
|
||||
}, new ParseField("reason"), ValueType.OBJECT_OR_STRING);
|
||||
SEARCH_FAILURE_PARSER.declareInt((b, v) -> {}, new ParseField("status"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Parser for the {@code _shards} element. Throws everything out except the errors array if there is one. If there isn't one then it
|
||||
* parses to an empty list.
|
||||
*/
|
||||
public static final ConstructingObjectParser<List<Throwable>, ParseFieldMatcherSupplier> SHARDS_PARSER =
|
||||
new ConstructingObjectParser<>("_shards", a -> {
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Throwable> failures = (List<Throwable>) a[0];
|
||||
failures = failures == null ? emptyList() : failures;
|
||||
return failures;
|
||||
});
|
||||
static {
|
||||
SHARDS_PARSER.declareObjectArray(optionalConstructorArg(), SEARCH_FAILURE_PARSER, new ParseField("failures"));
|
||||
SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("total"));
|
||||
SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("successful"));
|
||||
SHARDS_PARSER.declareInt((b, v) -> {}, new ParseField("failed"));
|
||||
}
|
||||
|
||||
public static final ConstructingObjectParser<Response, ParseFieldMatcherSupplier> RESPONSE_PARSER =
|
||||
new ConstructingObjectParser<>("search_response", a -> {
|
||||
int i = 0;
|
||||
Throwable catastrophicFailure = (Throwable) a[i++];
|
||||
if (catastrophicFailure != null) {
|
||||
return new Response(false, singletonList(new SearchFailure(catastrophicFailure)), 0, emptyList(), null);
|
||||
}
|
||||
boolean timedOut = (boolean) a[i++];
|
||||
String scroll = (String) a[i++];
|
||||
Object[] hitsElement = (Object[]) a[i++];
|
||||
@SuppressWarnings("unchecked")
|
||||
List<SearchFailure> failures = (List<SearchFailure>) a[i++];
|
||||
|
||||
long totalHits = 0;
|
||||
List<Hit> hits = emptyList();
|
||||
|
||||
// Pull apart the hits element if we got it
|
||||
if (hitsElement != null) {
|
||||
i = 0;
|
||||
totalHits = (long) hitsElement[i++];
|
||||
@SuppressWarnings("unchecked")
|
||||
List<Hit> h = (List<Hit>) hitsElement[i++];
|
||||
hits = h;
|
||||
}
|
||||
|
||||
return new Response(timedOut, failures, totalHits, hits, scroll);
|
||||
});
|
||||
static {
|
||||
RESPONSE_PARSER.declareObject(optionalConstructorArg(), ThrowableBuilder.PARSER, new ParseField("error"));
|
||||
RESPONSE_PARSER.declareBoolean(optionalConstructorArg(), new ParseField("timed_out"));
|
||||
RESPONSE_PARSER.declareString(optionalConstructorArg(), new ParseField("_scroll_id"));
|
||||
RESPONSE_PARSER.declareObject(optionalConstructorArg(), HITS_PARSER, new ParseField("hits"));
|
||||
RESPONSE_PARSER.declareObject(optionalConstructorArg(), SHARDS_PARSER, new ParseField("_shards"));
|
||||
RESPONSE_PARSER.declareInt((b, v) -> {}, new ParseField("took"));
|
||||
RESPONSE_PARSER.declareBoolean((b, v) -> {}, new ParseField("terminated_early"));
|
||||
RESPONSE_PARSER.declareInt((b, v) -> {}, new ParseField("status"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Collects stuff about Throwables and attempts to rebuild them.
|
||||
*/
|
||||
public static class ThrowableBuilder {
|
||||
public static final BiFunction<XContentParser, ParseFieldMatcherSupplier, Throwable> PARSER;
|
||||
static {
|
||||
ObjectParser<ThrowableBuilder, ParseFieldMatcherSupplier> parser = new ObjectParser<>("reason", ThrowableBuilder::new);
|
||||
PARSER = parser.andThen(ThrowableBuilder::build);
|
||||
parser.declareString(ThrowableBuilder::setType, new ParseField("type"));
|
||||
parser.declareString(ThrowableBuilder::setReason, new ParseField("reason"));
|
||||
parser.declareObject(ThrowableBuilder::setCausedBy, PARSER, new ParseField("caused_by"));
|
||||
|
||||
// So we can give a nice error for parsing exceptions
|
||||
parser.declareInt(ThrowableBuilder::setLine, new ParseField("line"));
|
||||
parser.declareInt(ThrowableBuilder::setColumn, new ParseField("col"));
|
||||
|
||||
// So we don't blow up on search exceptions
|
||||
parser.declareString((b, v) -> {}, new ParseField("phase"));
|
||||
parser.declareBoolean((b, v) -> {}, new ParseField("grouped"));
|
||||
parser.declareField((p, v, c) -> p.skipChildren(), new ParseField("failed_shards"), ValueType.OBJECT_ARRAY);
|
||||
|
||||
// Just throw away the root_cause
|
||||
parser.declareField((p, v, c) -> p.skipChildren(), new ParseField("root_cause"), ValueType.OBJECT_ARRAY);
|
||||
}
|
||||
|
||||
private String type;
|
||||
private String reason;
|
||||
private Integer line;
|
||||
private Integer column;
|
||||
private Throwable causedBy;
|
||||
|
||||
public Throwable build() {
|
||||
Throwable t = buildWithoutCause();
|
||||
if (causedBy != null) {
|
||||
t.initCause(causedBy);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
private Throwable buildWithoutCause() {
|
||||
requireNonNull(type, "[type] is required");
|
||||
requireNonNull(reason, "[reason] is required");
|
||||
switch (type) {
|
||||
// Make some effort to use the right exceptions
|
||||
case "es_rejected_execution_exception":
|
||||
return new EsRejectedExecutionException(reason);
|
||||
case "parsing_exception":
|
||||
XContentLocation location = null;
|
||||
if (line != null && column != null) {
|
||||
location = new XContentLocation(line, column);
|
||||
}
|
||||
return new ParsingException(location, reason);
|
||||
// But it isn't worth trying to get it perfect....
|
||||
default:
|
||||
return new RuntimeException(type + ": " + reason);
|
||||
}
|
||||
}
|
||||
|
||||
public void setType(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
public void setReason(String reason) {
|
||||
this.reason = reason;
|
||||
}
|
||||
public void setLine(Integer line) {
|
||||
this.line = line;
|
||||
}
|
||||
public void setColumn(Integer column) {
|
||||
this.column = column;
|
||||
}
|
||||
public void setCausedBy(Throwable causedBy) {
|
||||
this.causedBy = causedBy;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the {@code version} field of the main action. There are a surprising number of fields in this that we don't need!
|
||||
*/
|
||||
public static final ConstructingObjectParser<Version, ParseFieldMatcherSupplier> VERSION_PARSER = new ConstructingObjectParser<>(
|
||||
"version", a -> Version.fromString((String) a[0]));
|
||||
static {
|
||||
VERSION_PARSER.declareString(constructorArg(), new ParseField("number"));
|
||||
VERSION_PARSER.declareBoolean((p, v) -> {}, new ParseField("snapshot_build"));
|
||||
VERSION_PARSER.declareBoolean((p, v) -> {}, new ParseField("build_snapshot"));
|
||||
VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_hash"));
|
||||
VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_date"));
|
||||
VERSION_PARSER.declareString((p, v) -> {}, new ParseField("build_timestamp"));
|
||||
VERSION_PARSER.declareString((p, v) -> {}, new ParseField("lucene_version"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses the main action to return just the {@linkplain Version} that it returns. We throw everything else out.
|
||||
*/
|
||||
public static final ConstructingObjectParser<Version, ParseFieldMatcherSupplier> MAIN_ACTION_PARSER = new ConstructingObjectParser<>(
|
||||
"/", a -> (Version) a[0]);
|
||||
static {
|
||||
MAIN_ACTION_PARSER.declareBoolean((p, v) -> {}, new ParseField("ok"));
|
||||
MAIN_ACTION_PARSER.declareInt((p, v) -> {}, new ParseField("status"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("cluster_name"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("name"));
|
||||
MAIN_ACTION_PARSER.declareString((p, v) -> {}, new ParseField("tagline"));
|
||||
MAIN_ACTION_PARSER.declareObject(constructorArg(), VERSION_PARSER, new ParseField("version"));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,242 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.ParseFieldMatcherSupplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchParams;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchPath;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollParams;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollPath;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteResponseParsers.MAIN_ACTION_PARSER;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteResponseParsers.RESPONSE_PARSER;
|
||||
|
||||
public class RemoteScrollableHitSource extends ScrollableHitSource {
|
||||
private final AsyncClient client;
|
||||
private final BytesReference query;
|
||||
private final SearchRequest searchRequest;
|
||||
Version remoteVersion;
|
||||
|
||||
public RemoteScrollableHitSource(ESLogger logger, BackoffPolicy backoffPolicy, ThreadPool threadPool, Runnable countSearchRetry,
|
||||
Consumer<Exception> fail, AsyncClient client, BytesReference query, SearchRequest searchRequest) {
|
||||
super(logger, backoffPolicy, threadPool, countSearchRetry, fail);
|
||||
this.query = query;
|
||||
this.searchRequest = searchRequest;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
try {
|
||||
client.close();
|
||||
} catch (IOException e) {
|
||||
fail.accept(new IOException("couldn't close the remote connection", e));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart(Consumer<? super Response> onResponse) {
|
||||
lookupRemoteVersion(version -> {
|
||||
remoteVersion = version;
|
||||
execute("POST", initialSearchPath(searchRequest), initialSearchParams(searchRequest, version),
|
||||
initialSearchEntity(query), RESPONSE_PARSER, r -> onStartResponse(onResponse, r));
|
||||
});
|
||||
}
|
||||
|
||||
void lookupRemoteVersion(Consumer<Version> onVersion) {
|
||||
execute("GET", "", emptyMap(), null, MAIN_ACTION_PARSER, onVersion);
|
||||
|
||||
}
|
||||
|
||||
void onStartResponse(Consumer<? super Response> onResponse, Response response) {
|
||||
if (Strings.hasLength(response.getScrollId()) && response.getHits().isEmpty()) {
|
||||
logger.debug("First response looks like a scan response. Jumping right to the second. scroll=[{}]", response.getScrollId());
|
||||
doStartNextScroll(response.getScrollId(), timeValueMillis(0), onResponse);
|
||||
} else {
|
||||
onResponse.accept(response);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStartNextScroll(String scrollId, TimeValue extraKeepAlive, Consumer<? super Response> onResponse) {
|
||||
execute("POST", scrollPath(), scrollParams(timeValueNanos(searchRequest.scroll().keepAlive().nanos() + extraKeepAlive.nanos())),
|
||||
scrollEntity(scrollId), RESPONSE_PARSER, onResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void clearScroll(String scrollId) {
|
||||
// Need to throw out response....
|
||||
client.performRequest("DELETE", scrollPath(), emptyMap(), scrollEntity(scrollId), new ResponseListener() {
|
||||
@Override
|
||||
public void onResponse(InputStream response) {
|
||||
logger.debug("Successfully cleared [{}]", scrollId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRetryableFailure(Exception t) {
|
||||
onFailure(t);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
logger.warn("Failed to clear scroll [{}]", t, scrollId);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
<T> void execute(String method, String uri, Map<String, String> params, HttpEntity entity,
|
||||
BiFunction<XContentParser, ParseFieldMatcherSupplier, T> parser, Consumer<? super T> listener) {
|
||||
class RetryHelper extends AbstractRunnable {
|
||||
private final Iterator<TimeValue> retries = backoffPolicy.iterator();
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
client.performRequest(method, uri, params, entity, new ResponseListener() {
|
||||
@Override
|
||||
public void onResponse(InputStream content) {
|
||||
T response;
|
||||
try {
|
||||
XContent xContent = XContentFactory.xContentType(content).xContent();
|
||||
try(XContentParser xContentParser = xContent.createParser(content)) {
|
||||
response = parser.apply(xContentParser, () -> ParseFieldMatcher.STRICT);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Error deserializing response", e);
|
||||
}
|
||||
listener.accept(response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail.accept(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRetryableFailure(Exception t) {
|
||||
if (retries.hasNext()) {
|
||||
TimeValue delay = retries.next();
|
||||
logger.trace("retrying rejected search after [{}]", t, delay);
|
||||
countSearchRetry.run();
|
||||
threadPool.schedule(delay, ThreadPool.Names.SAME, RetryHelper.this);
|
||||
} else {
|
||||
fail.accept(t);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
fail.accept(t);
|
||||
}
|
||||
}
|
||||
new RetryHelper().run();
|
||||
}
|
||||
|
||||
public interface AsyncClient extends Closeable {
|
||||
void performRequest(String method, String uri, Map<String, String> params, HttpEntity entity, ResponseListener listener);
|
||||
}
|
||||
|
||||
public interface ResponseListener extends ActionListener<InputStream> {
|
||||
void onRetryableFailure(Exception t);
|
||||
}
|
||||
|
||||
public static class AsynchronizingRestClient implements AsyncClient {
|
||||
private final ThreadPool threadPool;
|
||||
private final RestClient restClient;
|
||||
|
||||
public AsynchronizingRestClient(ThreadPool threadPool, RestClient restClient) {
|
||||
this.threadPool = threadPool;
|
||||
this.restClient = restClient;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performRequest(String method, String uri, Map<String, String> params, HttpEntity entity,
|
||||
ResponseListener listener) {
|
||||
/*
|
||||
* We use the generic thread pool here because this client is blocking the generic thread pool is sized appropriately for some
|
||||
* of the threads on it to be blocked, waiting on IO. It'd be a disaster if this ran on the listener thread pool, eating
|
||||
* valuable threads needed to handle responses. Most other thread pool would probably not mind running this either, but the
|
||||
* generic thread pool is the "most right" place for it to run. We could make our own thread pool for this but the generic
|
||||
* thread pool already has plenty of capacity.
|
||||
*/
|
||||
threadPool.generic().execute(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
try (org.elasticsearch.client.Response response = restClient.performRequest(method, uri, params, entity)) {
|
||||
InputStream markSupportedInputStream = new BufferedInputStream(response.getEntity().getContent());
|
||||
listener.onResponse(markSupportedInputStream);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception t) {
|
||||
if (t instanceof ResponseException) {
|
||||
ResponseException re = (ResponseException) t;
|
||||
if (RestStatus.TOO_MANY_REQUESTS.getStatus() == re.getResponse().getStatusLine().getStatusCode()) {
|
||||
listener.onRetryableFailure(t);
|
||||
return;
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
restClient.close();
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,21 +22,15 @@ package org.elasticsearch.index.reindex;
|
|||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.reindex.AbstractAsyncBulkIndexByScrollAction.OpType;
|
||||
import org.elasticsearch.index.reindex.AbstractAsyncBulkIndexByScrollAction.RequestWrapper;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.junit.Before;
|
||||
import org.mockito.Matchers;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
|
@ -63,9 +57,7 @@ public abstract class AbstractAsyncBulkIndexByScrollActionScriptTestCase<
|
|||
@SuppressWarnings("unchecked")
|
||||
protected <T extends ActionRequest<?>> T applyScript(Consumer<Map<String, Object>> scriptBody) {
|
||||
IndexRequest index = new IndexRequest("index", "type", "1").source(singletonMap("foo", "bar"));
|
||||
Map<String, SearchHitField> fields = new HashMap<>();
|
||||
InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), fields);
|
||||
doc.shardTarget(new SearchShardTarget("nodeid", new Index("index", "uuid"), 1));
|
||||
ScrollableHitSource.Hit doc = new ScrollableHitSource.BasicHit("test", "type", "id", 0);
|
||||
ExecutableScript executableScript = new SimpleExecutableScript(scriptBody);
|
||||
|
||||
when(scriptService.executable(any(CompiledScript.class), Matchers.<Map<String, Object>>any()))
|
||||
|
|
|
@ -20,16 +20,7 @@
|
|||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.internal.TTLFieldMapper;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHitField;
|
||||
|
||||
import static java.util.Collections.singletonList;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
||||
public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<
|
||||
|
@ -37,25 +28,19 @@ public abstract class AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<
|
|||
Response extends BulkIndexByScrollResponse>
|
||||
extends AbstractAsyncBulkIndexByScrollActionTestCase<Request, Response> {
|
||||
|
||||
/**
|
||||
* Create a doc with some metadata.
|
||||
*/
|
||||
protected InternalSearchHit doc(String field, Object value) {
|
||||
InternalSearchHit doc = new InternalSearchHit(0, "id", new Text("type"), singletonMap(field,
|
||||
new InternalSearchHitField(field, singletonList(value))));
|
||||
doc.shardTarget(new SearchShardTarget("node", new Index("index", "uuid"), 0));
|
||||
return doc;
|
||||
protected ScrollableHitSource.BasicHit doc() {
|
||||
return new ScrollableHitSource.BasicHit("index", "type", "id", 0);
|
||||
}
|
||||
|
||||
public void testTimestampIsCopied() {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(TimestampFieldMapper.NAME, 10L));
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setTimestamp(10L));
|
||||
assertEquals("10", index.timestamp());
|
||||
}
|
||||
|
||||
public void testTTL() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(TTLFieldMapper.NAME, 10L));
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setTTL(10L));
|
||||
assertEquals(timeValueMillis(10), index.ttl());
|
||||
}
|
||||
|
||||
|
|
|
@ -62,9 +62,10 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
|||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.engine.VersionConflictEngineException;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.Hit;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.InternalSearchHits;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
@ -95,6 +96,7 @@ import static java.util.Collections.emptyList;
|
|||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.apache.lucene.util.TestUtil.randomSimpleString;
|
||||
import static org.elasticsearch.action.bulk.BackoffPolicy.constantBackoff;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
|
@ -103,7 +105,7 @@ import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
|
|||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.emptyCollectionOf;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
@ -155,7 +157,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
* random scroll id so it is checked instead.
|
||||
*/
|
||||
private String scrollId() {
|
||||
scrollId = randomSimpleString(random(), 1, 1000); // Empty strings get special behavior we don't want
|
||||
scrollId = randomSimpleString(random(), 1, 10); // Empty strings get special behavior we don't want
|
||||
return scrollId;
|
||||
}
|
||||
|
||||
|
@ -216,10 +218,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
assertEquals(0, testTask.getStatus().getTotal());
|
||||
|
||||
long total = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
InternalSearchHits hits = new InternalSearchHits(null, total, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueSeconds(0), 0,
|
||||
new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), total, emptyList(), null);
|
||||
simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueSeconds(0), 0, response);
|
||||
assertEquals(total, testTask.getStatus().getTotal());
|
||||
}
|
||||
|
||||
|
@ -229,12 +229,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
public void testScrollResponseBatchingBehavior() throws Exception {
|
||||
int maxBatches = randomIntBetween(0, 100);
|
||||
for (int batches = 1; batches < maxBatches; batches++) {
|
||||
InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap());
|
||||
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] { hit }, 0, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
Hit hit = new ScrollableHitSource.BasicHit("index", "type", "id", 0);
|
||||
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null);
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction();
|
||||
action.onScrollResponse(timeValueNanos(System.nanoTime()), 0,
|
||||
new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 0, response);
|
||||
|
||||
// Use assert busy because the update happens on another thread
|
||||
final int expectedBatches = batches;
|
||||
|
@ -314,16 +312,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
return null;
|
||||
}
|
||||
};
|
||||
InternalSearchHits hits = new InternalSearchHits(null, 0, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueNanos(System.nanoTime()), 10,
|
||||
new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null));
|
||||
try {
|
||||
listener.get();
|
||||
fail("Expected a failure");
|
||||
} catch (ExecutionException e) {
|
||||
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
|
||||
}
|
||||
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 0, emptyList(), null);
|
||||
simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 10, response);
|
||||
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
|
||||
assertThat(e.getMessage(), equalTo("EsRejectedExecutionException[test]"));
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
|
||||
// When the task is rejected we don't increment the throttled timer
|
||||
|
@ -335,12 +327,12 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
* scroll request going down.
|
||||
*/
|
||||
public void testShardFailuresAbortRequest() throws Exception {
|
||||
ShardSearchFailure shardFailure = new ShardSearchFailure(new RuntimeException("test"));
|
||||
InternalSearchResponse internalResponse = new InternalSearchResponse(null, null, null, null, false, null);
|
||||
new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueNanos(System.nanoTime()), 0,
|
||||
new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[] { shardFailure }));
|
||||
SearchFailure shardFailure = new SearchFailure(new RuntimeException("test"));
|
||||
ScrollableHitSource.Response scrollResponse = new ScrollableHitSource.Response(false, singletonList(shardFailure), 0,
|
||||
emptyList(), null);
|
||||
simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 0, scrollResponse);
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class));
|
||||
assertThat(response.getBulkFailures(), empty());
|
||||
assertThat(response.getSearchFailures(), contains(shardFailure));
|
||||
assertFalse(response.isTimedOut());
|
||||
assertNull(response.getReasonCancelled());
|
||||
|
@ -351,12 +343,11 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
* Mimicks search timeouts.
|
||||
*/
|
||||
public void testSearchTimeoutsAbortRequest() throws Exception {
|
||||
InternalSearchResponse internalResponse = new InternalSearchResponse(null, null, null, null, true, null);
|
||||
new DummyAbstractAsyncBulkByScrollAction().onScrollResponse(timeValueNanos(System.nanoTime()), 0,
|
||||
new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), new ShardSearchFailure[0]));
|
||||
ScrollableHitSource.Response scrollResponse = new ScrollableHitSource.Response(true, emptyList(), 0, emptyList(), null);
|
||||
simulateScrollResponse(new DummyAbstractAsyncBulkByScrollAction(), timeValueNanos(System.nanoTime()), 0, scrollResponse);
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), emptyCollectionOf(Failure.class));
|
||||
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
|
||||
assertThat(response.getBulkFailures(), empty());
|
||||
assertThat(response.getSearchFailures(), empty());
|
||||
assertTrue(response.isTimedOut());
|
||||
assertNull(response.getReasonCancelled());
|
||||
assertThat(client.scrollsCleared, contains(scrollId));
|
||||
|
@ -371,8 +362,8 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
BulkResponse bulkResponse = new BulkResponse(new BulkItemResponse[] {new BulkItemResponse(0, "index", failure)}, randomLong());
|
||||
action.onBulkResponse(timeValueNanos(System.nanoTime()), bulkResponse);
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), contains(failure));
|
||||
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
|
||||
assertThat(response.getBulkFailures(), contains(failure));
|
||||
assertThat(response.getSearchFailures(), empty());
|
||||
assertNull(response.getReasonCancelled());
|
||||
}
|
||||
|
||||
|
@ -382,15 +373,13 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
public void testListenerReceiveBuildBulkExceptions() throws Exception {
|
||||
DummyAbstractAsyncBulkByScrollAction action = new DummyAbstractAsyncBulkByScrollAction() {
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
protected BulkRequest buildBulk(Iterable<? extends ScrollableHitSource.Hit> docs) {
|
||||
throw new RuntimeException("surprise");
|
||||
}
|
||||
};
|
||||
InternalSearchHit hit = new InternalSearchHit(0, "id", new Text("type"), emptyMap());
|
||||
InternalSearchHits hits = new InternalSearchHits(new InternalSearchHit[] {hit}, 0, 0);
|
||||
InternalSearchResponse internalResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
SearchResponse searchResponse = new SearchResponse(internalResponse, scrollId(), 5, 4, randomLong(), null);
|
||||
action.onScrollResponse(timeValueNanos(System.nanoTime()), 0, searchResponse);
|
||||
Hit hit = new ScrollableHitSource.BasicHit("index", "type", "id", 0);
|
||||
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), 1, singletonList(hit), null);
|
||||
simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 0, response);
|
||||
ExecutionException e = expectThrows(ExecutionException.class, () -> listener.get());
|
||||
assertThat(e.getCause(), instanceOf(RuntimeException.class));
|
||||
assertThat(e.getCause().getMessage(), equalTo("surprise"));
|
||||
|
@ -499,9 +488,9 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
action.sendBulkRequest(timeValueNanos(System.nanoTime()), request);
|
||||
if (failWithRejection) {
|
||||
BulkIndexByScrollResponse response = listener.get();
|
||||
assertThat(response.getIndexingFailures(), hasSize(1));
|
||||
assertEquals(response.getIndexingFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS);
|
||||
assertThat(response.getSearchFailures(), emptyCollectionOf(ShardSearchFailure.class));
|
||||
assertThat(response.getBulkFailures(), hasSize(1));
|
||||
assertEquals(response.getBulkFailures().get(0).getStatus(), RestStatus.TOO_MANY_REQUESTS);
|
||||
assertThat(response.getSearchFailures(), empty());
|
||||
assertNull(response.getReasonCancelled());
|
||||
} else {
|
||||
successLatch.await(10, TimeUnit.SECONDS);
|
||||
|
@ -549,7 +538,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
if (addDestinationIndexes) {
|
||||
action.addDestinationIndices(singleton("foo"));
|
||||
}
|
||||
action.startNormalTermination(emptyList(), emptyList(), false);
|
||||
action.refreshAndFinish(emptyList(), emptyList(), false);
|
||||
if (shouldRefresh) {
|
||||
assertArrayEquals(new String[] {"foo"}, client.lastRefreshRequest.get().indices());
|
||||
} else {
|
||||
|
@ -563,7 +552,7 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
|
||||
public void testCancelBeforeScrollResponse() throws Exception {
|
||||
// We bail so early we don't need to pass in a half way valid response.
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.onScrollResponse(timeValueNanos(System.nanoTime()), 1,
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1,
|
||||
null));
|
||||
}
|
||||
|
||||
|
@ -582,10 +571,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNextScroll(timeValueNanos(System.nanoTime()), 0));
|
||||
}
|
||||
|
||||
public void testCancelBeforeStartNormalTermination() throws Exception {
|
||||
public void testCancelBeforeRefreshAndFinish() throws Exception {
|
||||
// Refresh or not doesn't matter - we don't try to refresh.
|
||||
testRequest.setRefresh(usually());
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.startNormalTermination(emptyList(), emptyList(), false));
|
||||
cancelTaskCase((DummyAbstractAsyncBulkByScrollAction action) -> action.refreshAndFinish(emptyList(), emptyList(), false));
|
||||
assertNull("No refresh was attempted", client.lastRefreshRequest.get());
|
||||
}
|
||||
|
||||
|
@ -625,12 +614,10 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
action.setScroll(scrollId());
|
||||
}
|
||||
long total = randomIntBetween(0, Integer.MAX_VALUE);
|
||||
InternalSearchHits hits = new InternalSearchHits(null, total, 0);
|
||||
InternalSearchResponse searchResponse = new InternalSearchResponse(hits, null, null, null, false, false);
|
||||
ScrollableHitSource.Response response = new ScrollableHitSource.Response(false, emptyList(), total, emptyList(), null);
|
||||
// Use a long delay here so the test will time out if the cancellation doesn't reschedule the throttled task
|
||||
SearchResponse scrollResponse = new SearchResponse(searchResponse, scrollId(), 5, 4, randomLong(), null);
|
||||
testTask.rethrottle(1);
|
||||
action.onScrollResponse(timeValueNanos(System.nanoTime()), 1000, scrollResponse);
|
||||
simulateScrollResponse(action, timeValueNanos(System.nanoTime()), 1000, response);
|
||||
|
||||
// Now that we've got our cancel we'll just verify that it all came through all right
|
||||
assertEquals(reason, listener.get(10, TimeUnit.SECONDS).getReasonCancelled());
|
||||
|
@ -656,23 +643,26 @@ public class AsyncBulkByScrollActionTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulate a scroll response by setting the scroll id and firing the onScrollResponse method.
|
||||
*/
|
||||
private void simulateScrollResponse(DummyAbstractAsyncBulkByScrollAction action, TimeValue lastBatchTime, int lastBatchSize,
|
||||
ScrollableHitSource.Response response) {
|
||||
action.setScroll(scrollId());
|
||||
action.onScrollResponse(lastBatchTime, lastBatchSize, response);
|
||||
}
|
||||
|
||||
private class DummyAbstractAsyncBulkByScrollAction
|
||||
extends AbstractAsyncBulkByScrollAction<DummyAbstractBulkByScrollRequest> {
|
||||
public DummyAbstractAsyncBulkByScrollAction() {
|
||||
super(testTask, logger, new ParentTaskAssigningClient(client, localNode, testTask), threadPool, testRequest, firstSearchRequest,
|
||||
listener);
|
||||
super(testTask, AsyncBulkByScrollActionTests.this.logger, new ParentTaskAssigningClient(client, localNode, testTask),
|
||||
AsyncBulkByScrollActionTests.this.threadPool, testRequest, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkRequest buildBulk(Iterable<SearchHit> docs) {
|
||||
protected BulkRequest buildBulk(Iterable<? extends ScrollableHitSource.Hit> docs) {
|
||||
return new BulkRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BulkIndexByScrollResponse buildResponse(TimeValue took, List<Failure> indexingFailures,
|
||||
List<ShardSearchFailure> searchFailures, boolean timedOut) {
|
||||
return new BulkIndexByScrollResponse(took, task.getStatus(), indexingFailures, searchFailures, timedOut);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -124,7 +124,7 @@ public class BulkIndexByScrollResponseMatcher extends TypeSafeMatcher<BulkIndexB
|
|||
deletedMatcher.matches(item.getDeleted()) &&
|
||||
(batchesMatcher == null || batchesMatcher.matches(item.getBatches())) &&
|
||||
versionConflictsMatcher.matches(item.getVersionConflicts()) &&
|
||||
failuresMatcher.matches(item.getIndexingFailures().size()) &&
|
||||
failuresMatcher.matches(item.getBulkFailures().size()) &&
|
||||
reasonCancelledMatcher.matches(item.getReasonCancelled());
|
||||
}
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ public class CancelTests extends ReindexTestCase {
|
|||
// And check the status of the response
|
||||
BulkIndexByScrollResponse response = future.get();
|
||||
assertThat(response.getReasonCancelled(), equalTo("by user request"));
|
||||
assertThat(response.getIndexingFailures(), emptyIterable());
|
||||
assertThat(response.getBulkFailures(), emptyIterable());
|
||||
assertThat(response.getSearchFailures(), emptyIterable());
|
||||
|
||||
flushAndRefresh(INDEX);
|
||||
|
|
|
@ -61,7 +61,7 @@ public class ReindexFailureTests extends ReindexTestCase {
|
|||
assertThat(response, matcher()
|
||||
.batches(1)
|
||||
.failures(both(greaterThan(0)).and(lessThanOrEqualTo(maximumNumberOfShards()))));
|
||||
for (Failure failure: response.getIndexingFailures()) {
|
||||
for (Failure failure: response.getBulkFailures()) {
|
||||
assertThat(failure.getMessage(), containsString("NumberFormatException[For input string: \"words words\"]"));
|
||||
}
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ public class ReindexFailureTests extends ReindexTestCase {
|
|||
|
||||
BulkIndexByScrollResponse response = copy.get();
|
||||
assertThat(response, matcher().batches(1).versionConflicts(1).failures(1).created(99));
|
||||
for (Failure failure: response.getIndexingFailures()) {
|
||||
for (Failure failure: response.getBulkFailures()) {
|
||||
assertThat(failure.getMessage(), containsString("VersionConflictEngineException[[test]["));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.transport.InetSocketTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.index.reindex.TransportReindexAction.checkRemoteWhitelist;
|
||||
|
||||
/**
|
||||
* Tests the reindex-from-remote whitelist of remotes.
|
||||
*/
|
||||
public class ReindexFromRemoteWhitelistTests extends ESTestCase {
|
||||
private TransportAddress localhost;
|
||||
|
||||
@Before
|
||||
public void setupLocalhost() throws UnknownHostException {
|
||||
localhost = new InetSocketTransportAddress(InetAddress.getByAddress(new byte[] { 0x7f, 0x00, 0x00, 0x01 }), 9200);
|
||||
}
|
||||
|
||||
public void testLocalRequestWithoutWhitelist() {
|
||||
checkRemoteWhitelist(emptySet(), null, localhostOrNone());
|
||||
}
|
||||
|
||||
public void testLocalRequestWithWhitelist() {
|
||||
checkRemoteWhitelist(randomWhitelist(), null, localhostOrNone());
|
||||
}
|
||||
|
||||
public void testWhitelistedRemote() {
|
||||
Set<String> whitelist = randomWhitelist();
|
||||
String[] inList = whitelist.iterator().next().split(":");
|
||||
String host = inList[0];
|
||||
int port = Integer.valueOf(inList[1]);
|
||||
checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), host, port, new BytesArray("test"), null, null),
|
||||
localhostOrNone());
|
||||
}
|
||||
|
||||
public void testMyselfInWhitelistRemote() throws UnknownHostException {
|
||||
Set<String> whitelist = randomWhitelist();
|
||||
whitelist.add("myself");
|
||||
TransportAddress publishAddress = new InetSocketTransportAddress(InetAddress.getByAddress(new byte[] {0x7f,0x00,0x00,0x01}), 9200);
|
||||
checkRemoteWhitelist(whitelist, new RemoteInfo(randomAsciiOfLength(5), "127.0.0.1", 9200, new BytesArray("test"), null, null),
|
||||
publishAddress);
|
||||
}
|
||||
|
||||
public void testUnwhitelistedRemote() {
|
||||
int port = between(1, Integer.MAX_VALUE);
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> checkRemoteWhitelist(randomWhitelist(),
|
||||
new RemoteInfo(randomAsciiOfLength(5), "not in list", port, new BytesArray("test"), null, null), localhostOrNone()));
|
||||
assertEquals("[not in list:" + port + "] not whitelisted in reindex.remote.whitelist", e.getMessage());
|
||||
}
|
||||
|
||||
private Set<String> randomWhitelist() {
|
||||
int size = between(1, 100);
|
||||
Set<String> set = new HashSet<>(size);
|
||||
while (set.size() < size) {
|
||||
set.add(randomAsciiOfLength(5) + ':' + between(1, Integer.MAX_VALUE));
|
||||
}
|
||||
return set;
|
||||
}
|
||||
|
||||
private TransportAddress localhostOrNone() {
|
||||
return randomFrom(random(), null, localhost);
|
||||
}
|
||||
}
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.index.reindex;
|
|||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
|
||||
/**
|
||||
* Index-by-search test for ttl, timestamp, and routing.
|
||||
|
@ -29,7 +28,7 @@ import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
|||
public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<ReindexRequest, BulkIndexByScrollResponse> {
|
||||
public void testRoutingCopiedByDefault() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo"));
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
|
@ -37,7 +36,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe
|
|||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("keep");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo"));
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
|
@ -45,7 +44,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe
|
|||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("discard");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo"));
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo"));
|
||||
assertEquals(null, index.routing());
|
||||
}
|
||||
|
||||
|
@ -53,7 +52,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe
|
|||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("=cat");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo"));
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo"));
|
||||
assertEquals("cat", index.routing());
|
||||
}
|
||||
|
||||
|
@ -61,7 +60,7 @@ public class ReindexMetadataTests extends AbstractAsyncBulkIndexbyScrollActionMe
|
|||
TransportReindexAction.AsyncIndexBySearchAction action = action();
|
||||
action.mainRequest.getDestination().routing("==]");
|
||||
IndexRequest index = new IndexRequest();
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo"));
|
||||
action.copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo"));
|
||||
assertEquals("=]", index.routing());
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
|
||||
/**
|
||||
* Tests some of the validation of {@linkplain ReindexRequest}. See reindex's rest tests for much more.
|
||||
*/
|
||||
public class ReindexRequestTests extends ESTestCase {
|
||||
public void testTimestampAndTtlNotAllowed() {
|
||||
ReindexRequest reindex = request();
|
||||
reindex.getDestination().ttl("1s").timestamp("now");
|
||||
ActionRequestValidationException e = reindex.validate();
|
||||
assertEquals("Validation Failed: 1: setting ttl on destination isn't supported. use scripts instead.;"
|
||||
+ "2: setting timestamp on destination isn't supported. use scripts instead.;",
|
||||
e.getMessage());
|
||||
}
|
||||
|
||||
public void testReindexFromRemoteDoesNotSupportSearchQuery() {
|
||||
ReindexRequest reindex = request();
|
||||
reindex.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), between(1, Integer.MAX_VALUE),
|
||||
new BytesArray("real_query"), null, null));
|
||||
reindex.getSearchRequest().source().query(matchAllQuery()); // Unsupported place to put query
|
||||
ActionRequestValidationException e = reindex.validate();
|
||||
assertEquals("Validation Failed: 1: reindex from remote sources should use RemoteInfo's query instead of source's query;",
|
||||
e.getMessage());
|
||||
}
|
||||
|
||||
private ReindexRequest request() {
|
||||
ReindexRequest reindex = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
reindex.getSearchRequest().indices("source");
|
||||
reindex.getDestination().index("dest");
|
||||
return reindex;
|
||||
}
|
||||
}
|
|
@ -30,15 +30,20 @@ import org.elasticsearch.cluster.metadata.AliasMetaData;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
/**
|
||||
* Tests that indexing from an index back into itself fails the request.
|
||||
* Tests source and target index validation of reindex. Mostly that means testing that indexing from an index back into itself fails the
|
||||
* request. Note that we can't catch you trying to remotely reindex from yourself into yourself. We actually assert here that reindexes
|
||||
* from remote don't need to come from existing indexes. It'd be silly to fail requests if the source index didn't exist on the target
|
||||
* cluster....
|
||||
*/
|
||||
public class ReindexSameIndexTests extends ESTestCase {
|
||||
public class ReindexSourceTargetValidationTests extends ESTestCase {
|
||||
private static final ClusterState STATE = ClusterState.builder(new ClusterName("test")).metaData(MetaData.builder()
|
||||
.put(index("target", "target_alias", "target_multi"), true)
|
||||
.put(index("target2", "target_multi"), true)
|
||||
|
@ -50,7 +55,7 @@ public class ReindexSameIndexTests extends ESTestCase {
|
|||
private static final IndexNameExpressionResolver INDEX_NAME_EXPRESSION_RESOLVER = new IndexNameExpressionResolver(Settings.EMPTY);
|
||||
private static final AutoCreateIndex AUTO_CREATE_INDEX = new AutoCreateIndex(Settings.EMPTY, INDEX_NAME_EXPRESSION_RESOLVER);
|
||||
|
||||
public void testObviousCases() throws Exception {
|
||||
public void testObviousCases() {
|
||||
fails("target", "target");
|
||||
fails("target", "foo", "bar", "target", "baz");
|
||||
fails("target", "foo", "bar", "target", "baz", "target");
|
||||
|
@ -58,7 +63,7 @@ public class ReindexSameIndexTests extends ESTestCase {
|
|||
succeeds("target", "source", "source2");
|
||||
}
|
||||
|
||||
public void testAliasesContainTarget() throws Exception {
|
||||
public void testAliasesContainTarget() {
|
||||
fails("target", "target_alias");
|
||||
fails("target_alias", "target");
|
||||
fails("target", "foo", "bar", "target_alias", "baz");
|
||||
|
@ -71,31 +76,33 @@ public class ReindexSameIndexTests extends ESTestCase {
|
|||
succeeds("target", "source", "source2", "source_multi");
|
||||
}
|
||||
|
||||
public void testTargetIsAlias() throws Exception {
|
||||
try {
|
||||
succeeds("target_multi", "foo");
|
||||
fail("Expected failure");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [["));
|
||||
// The index names can come in either order
|
||||
assertThat(e.getMessage(), containsString("target"));
|
||||
assertThat(e.getMessage(), containsString("target2"));
|
||||
}
|
||||
public void testTargetIsAlias() {
|
||||
Exception e = expectThrows(IllegalArgumentException.class, () -> succeeds("target_multi", "foo"));
|
||||
assertThat(e.getMessage(), containsString("Alias [target_multi] has more than one indices associated with it [["));
|
||||
// The index names can come in either order
|
||||
assertThat(e.getMessage(), containsString("target"));
|
||||
assertThat(e.getMessage(), containsString("target2"));
|
||||
}
|
||||
|
||||
private void fails(String target, String... sources) throws Exception {
|
||||
try {
|
||||
succeeds(target, sources);
|
||||
fail("Expected an exception");
|
||||
} catch (ActionRequestValidationException e) {
|
||||
assertThat(e.getMessage(),
|
||||
containsString("reindex cannot write into an index its reading from [target]"));
|
||||
}
|
||||
public void testRemoteInfoSkipsValidation() {
|
||||
// The index doesn't have to exist
|
||||
succeeds(new RemoteInfo(randomAsciiOfLength(5), "test", 9200, new BytesArray("test"), null, null), "does_not_exist", "target");
|
||||
// And it doesn't matter if they are the same index. They are considered to be different because the remote one is, well, remote.
|
||||
succeeds(new RemoteInfo(randomAsciiOfLength(5), "test", 9200, new BytesArray("test"), null, null), "target", "target");
|
||||
}
|
||||
|
||||
private void succeeds(String target, String... sources) throws Exception {
|
||||
TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), INDEX_NAME_EXPRESSION_RESOLVER,
|
||||
AUTO_CREATE_INDEX, STATE);
|
||||
private void fails(String target, String... sources) {
|
||||
Exception e = expectThrows(ActionRequestValidationException.class, () -> succeeds(target, sources));
|
||||
assertThat(e.getMessage(), containsString("reindex cannot write into an index its reading from [target]"));
|
||||
}
|
||||
|
||||
private void succeeds(String target, String... sources) {
|
||||
succeeds(null, target, sources);
|
||||
}
|
||||
|
||||
private void succeeds(RemoteInfo remoteInfo, String target, String... sources) {
|
||||
TransportReindexAction.validateAgainstAliases(new SearchRequest(sources), new IndexRequest(target), remoteInfo,
|
||||
INDEX_NAME_EXPRESSION_RESOLVER, AUTO_CREATE_INDEX, STATE);
|
||||
}
|
||||
|
||||
private static IndexMetaData index(String name, String... aliases) {
|
|
@ -0,0 +1,121 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.index.reindex.RestReindexAction.ReindexParseContext;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class RestReindexActionTests extends ESTestCase {
|
||||
public void testBuildRemoteInfoNoRemote() throws IOException {
|
||||
assertNull(RestReindexAction.buildRemoteInfo(new HashMap<>()));
|
||||
}
|
||||
|
||||
public void testBuildRemoteInfoFullyLoaded() throws IOException {
|
||||
Map<String, Object> remote = new HashMap<>();
|
||||
remote.put("host", "https://example.com:9200");
|
||||
remote.put("username", "testuser");
|
||||
remote.put("password", "testpass");
|
||||
|
||||
Map<String, Object> query = new HashMap<>();
|
||||
query.put("a", "b");
|
||||
|
||||
Map<String, Object> source = new HashMap<>();
|
||||
source.put("remote", remote);
|
||||
source.put("query", query);
|
||||
|
||||
RemoteInfo remoteInfo = RestReindexAction.buildRemoteInfo(source);
|
||||
assertEquals("https", remoteInfo.getScheme());
|
||||
assertEquals("example.com", remoteInfo.getHost());
|
||||
assertEquals(9200, remoteInfo.getPort());
|
||||
assertEquals("{\n \"a\" : \"b\"\n}", remoteInfo.getQuery().utf8ToString());
|
||||
assertEquals("testuser", remoteInfo.getUsername());
|
||||
assertEquals("testpass", remoteInfo.getPassword());
|
||||
}
|
||||
|
||||
public void testBuildRemoteInfoWithoutAllParts() throws IOException {
|
||||
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com"));
|
||||
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("example.com:9200"));
|
||||
expectThrows(IllegalArgumentException.class, () -> buildRemoteInfoHostTestCase("http://example.com"));
|
||||
}
|
||||
|
||||
public void testBuildRemoteInfoWithAllParts() throws IOException {
|
||||
RemoteInfo info = buildRemoteInfoHostTestCase("http://example.com:9200");
|
||||
assertEquals("http", info.getScheme());
|
||||
assertEquals("example.com", info.getHost());
|
||||
assertEquals(9200, info.getPort());
|
||||
|
||||
info = buildRemoteInfoHostTestCase("https://other.example.com:9201");
|
||||
assertEquals("https", info.getScheme());
|
||||
assertEquals("other.example.com", info.getHost());
|
||||
assertEquals(9201, info.getPort());
|
||||
}
|
||||
|
||||
public void testReindexFromRemoteRequestParsing() throws IOException {
|
||||
BytesReference request;
|
||||
try (XContentBuilder b = JsonXContent.contentBuilder()) {
|
||||
b.startObject(); {
|
||||
b.startObject("source"); {
|
||||
b.startObject("remote"); {
|
||||
b.field("host", "http://localhost:9200");
|
||||
}
|
||||
b.endObject();
|
||||
b.field("index", "source");
|
||||
}
|
||||
b.endObject();
|
||||
b.startObject("dest"); {
|
||||
b.field("index", "dest");
|
||||
}
|
||||
b.endObject();
|
||||
}
|
||||
b.endObject();
|
||||
request = b.bytes();
|
||||
}
|
||||
try (XContentParser p = JsonXContent.jsonXContent.createParser(request)) {
|
||||
ReindexRequest r = new ReindexRequest(new SearchRequest(), new IndexRequest());
|
||||
RestReindexAction.PARSER.parse(p, r,
|
||||
new ReindexParseContext(new IndicesQueriesRegistry(), null, null, ParseFieldMatcher.STRICT));
|
||||
assertEquals("localhost", r.getRemoteInfo().getHost());
|
||||
assertArrayEquals(new String[] {"source"}, r.getSearchRequest().indices());
|
||||
}
|
||||
}
|
||||
|
||||
private RemoteInfo buildRemoteInfoHostTestCase(String hostInRest) throws IOException {
|
||||
Map<String, Object> remote = new HashMap<>();
|
||||
remote.put("host", hostInRest);
|
||||
|
||||
Map<String, Object> source = new HashMap<>();
|
||||
source.put("remote", remote);
|
||||
|
||||
return RestReindexAction.buildRemoteInfo(source);
|
||||
}
|
||||
}
|
|
@ -20,13 +20,18 @@
|
|||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.bulk.BulkRequestBuilder;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.bulk.Retry;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -39,6 +44,7 @@ import java.util.List;
|
|||
import java.util.concurrent.CyclicBarrier;
|
||||
|
||||
import static org.elasticsearch.index.reindex.ReindexTestCase.matcher;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
|
@ -68,6 +74,10 @@ public class RetryTests extends ESSingleNodeTestCase {
|
|||
// Use queues of size 1 because size 0 is broken and because search requests need the queue to function
|
||||
settings.put("thread_pool.bulk.queue_size", 1);
|
||||
settings.put("thread_pool.search.queue_size", 1);
|
||||
// Enable http so we can test retries on reindex from remote. In this case the "remote" cluster is just this cluster.
|
||||
settings.put(NetworkModule.HTTP_ENABLED.getKey(), true);
|
||||
// Whitelist reindexing from the http host we're going to use
|
||||
settings.put(TransportReindexAction.REMOTE_CLUSTER_WHITELIST.getKey(), "myself");
|
||||
return settings.build();
|
||||
}
|
||||
|
||||
|
@ -97,6 +107,15 @@ public class RetryTests extends ESSingleNodeTestCase {
|
|||
matcher().created(DOC_COUNT));
|
||||
}
|
||||
|
||||
public void testReindexFromRemote() throws Exception {
|
||||
NodeInfo nodeInfo = client().admin().cluster().prepareNodesInfo().get().getNodes().get(0);
|
||||
TransportAddress address = nodeInfo.getHttp().getAddress().publishAddress();
|
||||
RemoteInfo remote = new RemoteInfo("http", address.getHost(), address.getPort(), new BytesArray("{\"match_all\":{}}"), null, null);
|
||||
ReindexRequestBuilder request = ReindexAction.INSTANCE.newRequestBuilder(client()).source("source").destination("dest")
|
||||
.setRemoteInfo(remote);
|
||||
testCase(ReindexAction.NAME, request, matcher().created(DOC_COUNT));
|
||||
}
|
||||
|
||||
public void testUpdateByQuery() throws Exception {
|
||||
testCase(UpdateByQueryAction.NAME, UpdateByQueryAction.INSTANCE.newRequestBuilder(client()).source("source"),
|
||||
matcher().updated(DOC_COUNT));
|
||||
|
@ -118,34 +137,41 @@ public class RetryTests extends ESSingleNodeTestCase {
|
|||
logger.info("Starting request");
|
||||
ListenableActionFuture<BulkIndexByScrollResponse> responseListener = request.execute();
|
||||
|
||||
logger.info("Waiting for search rejections on the initial search");
|
||||
assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(0L)));
|
||||
try {
|
||||
logger.info("Waiting for search rejections on the initial search");
|
||||
assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(0L)));
|
||||
|
||||
logger.info("Blocking bulk and unblocking search so we start to get bulk rejections");
|
||||
CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.BULK);
|
||||
initialSearchBlock.await();
|
||||
logger.info("Blocking bulk and unblocking search so we start to get bulk rejections");
|
||||
CyclicBarrier bulkBlock = blockExecutor(ThreadPool.Names.BULK);
|
||||
initialSearchBlock.await();
|
||||
|
||||
logger.info("Waiting for bulk rejections");
|
||||
assertBusy(() -> assertThat(taskStatus(action).getBulkRetries(), greaterThan(0L)));
|
||||
logger.info("Waiting for bulk rejections");
|
||||
assertBusy(() -> assertThat(taskStatus(action).getBulkRetries(), greaterThan(0L)));
|
||||
|
||||
// Keep a copy of the current number of search rejections so we can assert that we get more when we block the scroll
|
||||
long initialSearchRejections = taskStatus(action).getSearchRetries();
|
||||
// Keep a copy of the current number of search rejections so we can assert that we get more when we block the scroll
|
||||
long initialSearchRejections = taskStatus(action).getSearchRetries();
|
||||
|
||||
logger.info("Blocking search and unblocking bulk so we should get search rejections for the scroll");
|
||||
CyclicBarrier scrollBlock = blockExecutor(ThreadPool.Names.SEARCH);
|
||||
bulkBlock.await();
|
||||
logger.info("Blocking search and unblocking bulk so we should get search rejections for the scroll");
|
||||
CyclicBarrier scrollBlock = blockExecutor(ThreadPool.Names.SEARCH);
|
||||
bulkBlock.await();
|
||||
|
||||
logger.info("Waiting for search rejections for the scroll");
|
||||
assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(initialSearchRejections)));
|
||||
logger.info("Waiting for search rejections for the scroll");
|
||||
assertBusy(() -> assertThat(taskStatus(action).getSearchRetries(), greaterThan(initialSearchRejections)));
|
||||
|
||||
logger.info("Unblocking the scroll");
|
||||
scrollBlock.await();
|
||||
logger.info("Unblocking the scroll");
|
||||
scrollBlock.await();
|
||||
|
||||
logger.info("Waiting for the request to finish");
|
||||
BulkIndexByScrollResponse response = responseListener.get();
|
||||
assertThat(response, matcher);
|
||||
assertThat(response.getBulkRetries(), greaterThan(0L));
|
||||
assertThat(response.getSearchRetries(), greaterThan(initialSearchRejections));
|
||||
logger.info("Waiting for the request to finish");
|
||||
BulkIndexByScrollResponse response = responseListener.get();
|
||||
assertThat(response, matcher);
|
||||
assertThat(response.getBulkRetries(), greaterThan(0L));
|
||||
assertThat(response.getSearchRetries(), greaterThan(initialSearchRejections));
|
||||
} finally {
|
||||
// Fetch the response just in case we blew up half way through. This will make sure the failure is thrown up to the top level.
|
||||
BulkIndexByScrollResponse response = responseListener.get();
|
||||
assertThat(response.getSearchFailures(), empty());
|
||||
assertThat(response.getBulkFailures(), empty());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,20 +19,21 @@
|
|||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.WriteConsistencyLevel;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse.Failure;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.SearchFailure;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteInfo;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -56,11 +57,28 @@ public class RoundTripTests extends ESTestCase {
|
|||
randomRequest(reindex);
|
||||
reindex.getDestination().version(randomFrom(Versions.MATCH_ANY, Versions.MATCH_DELETED, 12L, 1L, 123124L, 12L));
|
||||
reindex.getDestination().index("test");
|
||||
if (randomBoolean()) {
|
||||
int port = between(1, Integer.MAX_VALUE);
|
||||
BytesReference query = new BytesArray(randomAsciiOfLength(5));
|
||||
String username = randomBoolean() ? randomAsciiOfLength(5) : null;
|
||||
String password = username != null && randomBoolean() ? randomAsciiOfLength(5) : null;
|
||||
reindex.setRemoteInfo(new RemoteInfo(randomAsciiOfLength(5), randomAsciiOfLength(5), port, query, username, password));
|
||||
}
|
||||
ReindexRequest tripped = new ReindexRequest();
|
||||
roundTrip(reindex, tripped);
|
||||
assertRequestEquals(reindex, tripped);
|
||||
assertEquals(reindex.getDestination().version(), tripped.getDestination().version());
|
||||
assertEquals(reindex.getDestination().index(), tripped.getDestination().index());
|
||||
if (reindex.getRemoteInfo() == null) {
|
||||
assertNull(tripped.getRemoteInfo());
|
||||
} else {
|
||||
assertNotNull(tripped.getRemoteInfo());
|
||||
assertEquals(reindex.getRemoteInfo().getScheme(), tripped.getRemoteInfo().getScheme());
|
||||
assertEquals(reindex.getRemoteInfo().getHost(), tripped.getRemoteInfo().getHost());
|
||||
assertEquals(reindex.getRemoteInfo().getQuery(), tripped.getRemoteInfo().getQuery());
|
||||
assertEquals(reindex.getRemoteInfo().getUsername(), tripped.getRemoteInfo().getUsername());
|
||||
assertEquals(reindex.getRemoteInfo().getPassword(), tripped.getRemoteInfo().getPassword());
|
||||
}
|
||||
}
|
||||
|
||||
public void testUpdateByQueryRequest() throws IOException {
|
||||
|
@ -149,13 +167,19 @@ public class RoundTripTests extends ESTestCase {
|
|||
randomSimpleString(random()), new IllegalArgumentException("test")));
|
||||
}
|
||||
|
||||
private List<ShardSearchFailure> randomSearchFailures() {
|
||||
if (usually()) {
|
||||
private List<SearchFailure> randomSearchFailures() {
|
||||
if (randomBoolean()) {
|
||||
return emptyList();
|
||||
}
|
||||
Index index = new Index(randomSimpleString(random()), "uuid");
|
||||
return singletonList(new ShardSearchFailure(randomSimpleString(random()),
|
||||
new SearchShardTarget(randomSimpleString(random()), index, randomInt()), randomFrom(RestStatus.values())));
|
||||
String index = null;
|
||||
Integer shardId = null;
|
||||
String nodeId = null;
|
||||
if (randomBoolean()) {
|
||||
index = randomAsciiOfLength(5);
|
||||
shardId = randomInt();
|
||||
nodeId = usually() ? randomAsciiOfLength(5) : null;
|
||||
}
|
||||
return singletonList(new SearchFailure(new ElasticsearchException("foo"), index, shardId, nodeId));
|
||||
}
|
||||
|
||||
private void roundTrip(Streamable example, Streamable empty) throws IOException {
|
||||
|
@ -182,10 +206,10 @@ public class RoundTripTests extends ESTestCase {
|
|||
private void assertResponseEquals(BulkIndexByScrollResponse expected, BulkIndexByScrollResponse actual) {
|
||||
assertEquals(expected.getTook(), actual.getTook());
|
||||
assertTaskStatusEquals(expected.getStatus(), actual.getStatus());
|
||||
assertEquals(expected.getIndexingFailures().size(), actual.getIndexingFailures().size());
|
||||
for (int i = 0; i < expected.getIndexingFailures().size(); i++) {
|
||||
Failure expectedFailure = expected.getIndexingFailures().get(i);
|
||||
Failure actualFailure = actual.getIndexingFailures().get(i);
|
||||
assertEquals(expected.getBulkFailures().size(), actual.getBulkFailures().size());
|
||||
for (int i = 0; i < expected.getBulkFailures().size(); i++) {
|
||||
Failure expectedFailure = expected.getBulkFailures().get(i);
|
||||
Failure actualFailure = actual.getBulkFailures().get(i);
|
||||
assertEquals(expectedFailure.getIndex(), actualFailure.getIndex());
|
||||
assertEquals(expectedFailure.getType(), actualFailure.getType());
|
||||
assertEquals(expectedFailure.getId(), actualFailure.getId());
|
||||
|
@ -194,13 +218,15 @@ public class RoundTripTests extends ESTestCase {
|
|||
}
|
||||
assertEquals(expected.getSearchFailures().size(), actual.getSearchFailures().size());
|
||||
for (int i = 0; i < expected.getSearchFailures().size(); i++) {
|
||||
ShardSearchFailure expectedFailure = expected.getSearchFailures().get(i);
|
||||
ShardSearchFailure actualFailure = actual.getSearchFailures().get(i);
|
||||
assertEquals(expectedFailure.shard(), actualFailure.shard());
|
||||
assertEquals(expectedFailure.status(), actualFailure.status());
|
||||
// We can't use getCause because throwable doesn't implement equals
|
||||
assertEquals(expectedFailure.reason(), actualFailure.reason());
|
||||
SearchFailure expectedFailure = expected.getSearchFailures().get(i);
|
||||
SearchFailure actualFailure = actual.getSearchFailures().get(i);
|
||||
assertEquals(expectedFailure.getIndex(), actualFailure.getIndex());
|
||||
assertEquals(expectedFailure.getShardId(), actualFailure.getShardId());
|
||||
assertEquals(expectedFailure.getNodeId(), actualFailure.getNodeId());
|
||||
assertEquals(expectedFailure.getReason().getClass(), actualFailure.getReason().getClass());
|
||||
assertEquals(expectedFailure.getReason().getMessage(), actualFailure.getReason().getMessage());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private void assertTaskStatusEquals(BulkByScrollTask.Status expected, BulkByScrollTask.Status actual) {
|
||||
|
|
|
@ -21,13 +21,12 @@ package org.elasticsearch.index.reindex;
|
|||
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.index.mapper.internal.RoutingFieldMapper;
|
||||
|
||||
public class UpdateByQueryMetadataTests
|
||||
extends AbstractAsyncBulkIndexbyScrollActionMetadataTestCase<UpdateByQueryRequest, BulkIndexByScrollResponse> {
|
||||
public void testRoutingIsCopied() throws Exception {
|
||||
IndexRequest index = new IndexRequest();
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc(RoutingFieldMapper.NAME, "foo"));
|
||||
action().copyMetadata(AbstractAsyncBulkIndexByScrollAction.wrap(index), doc().setRouting("foo"));
|
||||
assertEquals("foo", index.routing());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.index.reindex;
|
||||
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.util.Date;
|
||||
|
@ -48,7 +49,7 @@ public class UpdateByQueryWithScriptTests
|
|||
|
||||
@Override
|
||||
protected UpdateByQueryRequest request() {
|
||||
return new UpdateByQueryRequest();
|
||||
return new UpdateByQueryRequest(new SearchRequest());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
public class RemoteInfoTests extends ESTestCase {
|
||||
public void testToString() {
|
||||
RemoteInfo info = new RemoteInfo("http", "testhost", 12344, new BytesArray("testquery"), null, null);
|
||||
assertEquals("host=testhost port=12344 query=testquery", info.toString());
|
||||
info = new RemoteInfo("http", "testhost", 12344, new BytesArray("testquery"), "testuser", null);
|
||||
assertEquals("host=testhost port=12344 query=testquery username=testuser", info.toString());
|
||||
info = new RemoteInfo("http", "testhost", 12344, new BytesArray("testquery"), "testuser", "testpass");
|
||||
assertEquals("host=testhost port=12344 query=testquery username=testuser password=<<>>", info.toString());
|
||||
info = new RemoteInfo("https", "testhost", 12344, new BytesArray("testquery"), "testuser", "testpass");
|
||||
assertEquals("scheme=https host=testhost port=12344 query=testquery username=testuser password=<<>>", info.toString());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStreamReader;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchParams;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.initialSearchPath;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollEntity;
|
||||
import static org.elasticsearch.index.reindex.remote.RemoteRequestBuilders.scrollParams;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.hasEntry;
|
||||
import static org.hamcrest.Matchers.hasKey;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class RemoteRequestBuildersTests extends ESTestCase {
|
||||
public void testIntialSearchPath() {
|
||||
SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
|
||||
|
||||
assertEquals("/_search", initialSearchPath(searchRequest));
|
||||
searchRequest.indices("a");
|
||||
searchRequest.types("b");
|
||||
assertEquals("/a/b/_search", initialSearchPath(searchRequest));
|
||||
searchRequest.indices("a", "b");
|
||||
searchRequest.types("c", "d");
|
||||
assertEquals("/a,b/c,d/_search", initialSearchPath(searchRequest));
|
||||
|
||||
searchRequest.indices("cat,");
|
||||
expectBadStartRequest(searchRequest, "Index", ",", "cat,");
|
||||
searchRequest.indices("cat,", "dog");
|
||||
expectBadStartRequest(searchRequest, "Index", ",", "cat,");
|
||||
searchRequest.indices("dog", "cat,");
|
||||
expectBadStartRequest(searchRequest, "Index", ",", "cat,");
|
||||
searchRequest.indices("cat/");
|
||||
expectBadStartRequest(searchRequest, "Index", "/", "cat/");
|
||||
searchRequest.indices("cat/", "dog");
|
||||
expectBadStartRequest(searchRequest, "Index", "/", "cat/");
|
||||
searchRequest.indices("dog", "cat/");
|
||||
expectBadStartRequest(searchRequest, "Index", "/", "cat/");
|
||||
|
||||
searchRequest.indices("ok");
|
||||
searchRequest.types("cat,");
|
||||
expectBadStartRequest(searchRequest, "Type", ",", "cat,");
|
||||
searchRequest.types("cat,", "dog");
|
||||
expectBadStartRequest(searchRequest, "Type", ",", "cat,");
|
||||
searchRequest.types("dog", "cat,");
|
||||
expectBadStartRequest(searchRequest, "Type", ",", "cat,");
|
||||
searchRequest.types("cat/");
|
||||
expectBadStartRequest(searchRequest, "Type", "/", "cat/");
|
||||
searchRequest.types("cat/", "dog");
|
||||
expectBadStartRequest(searchRequest, "Type", "/", "cat/");
|
||||
searchRequest.types("dog", "cat/");
|
||||
expectBadStartRequest(searchRequest, "Type", "/", "cat/");
|
||||
}
|
||||
|
||||
private void expectBadStartRequest(SearchRequest searchRequest, String type, String bad, String failed) {
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> initialSearchPath(searchRequest));
|
||||
assertEquals(type + " containing [" + bad + "] not supported but got [" + failed + "]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testInitialSearchParamsSort() {
|
||||
SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
|
||||
|
||||
// Test sort:_doc for versions that support it.
|
||||
Version remoteVersion = Version.fromId(between(Version.V_2_1_0_ID, Version.CURRENT.id));
|
||||
searchRequest.source().sort("_doc");
|
||||
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sorts", "_doc:asc"));
|
||||
|
||||
// Test search_type scan for versions that don't support sort:_doc.
|
||||
remoteVersion = Version.fromId(between(0, Version.V_2_1_0_ID - 1));
|
||||
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("search_type", "scan"));
|
||||
|
||||
// Test sorting by some field. Version doesn't matter.
|
||||
remoteVersion = Version.fromId(between(0, Version.CURRENT.id));
|
||||
searchRequest.source().sorts().clear();
|
||||
searchRequest.source().sort("foo");
|
||||
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("sorts", "foo:asc"));
|
||||
}
|
||||
|
||||
public void testInitialSearchParamsFields() {
|
||||
SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
|
||||
|
||||
// Test request without any fields
|
||||
Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id));
|
||||
assertThat(initialSearchParams(searchRequest, remoteVersion),
|
||||
not(either(hasKey("stored_fields")).or(hasKey("fields"))));
|
||||
|
||||
// Setup some fields for the next two tests
|
||||
searchRequest.source().storedField("_source").storedField("_id");
|
||||
|
||||
// Test stored_fields for versions that support it
|
||||
remoteVersion = Version.fromId(between(Version.V_5_0_0_alpha4_ID, Version.CURRENT.id));
|
||||
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("stored_fields", "_source,_id"));
|
||||
|
||||
// Test fields for versions that support it
|
||||
remoteVersion = Version.fromId(between(0, Version.V_5_0_0_alpha4_ID - 1));
|
||||
assertThat(initialSearchParams(searchRequest, remoteVersion), hasEntry("fields", "_source,_id"));
|
||||
}
|
||||
|
||||
public void testInitialSearchParamsMisc() {
|
||||
SearchRequest searchRequest = new SearchRequest().source(new SearchSourceBuilder());
|
||||
Version remoteVersion = Version.fromId(between(0, Version.CURRENT.id));
|
||||
|
||||
TimeValue scroll = null;
|
||||
if (randomBoolean()) {
|
||||
scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test");
|
||||
searchRequest.scroll(scroll);
|
||||
}
|
||||
int size = between(0, Integer.MAX_VALUE);
|
||||
searchRequest.source().size(size);
|
||||
Boolean fetchVersion = null;
|
||||
if (randomBoolean()) {
|
||||
fetchVersion = randomBoolean();
|
||||
searchRequest.source().version(fetchVersion);
|
||||
}
|
||||
|
||||
Map<String, String> params = initialSearchParams(searchRequest, remoteVersion);
|
||||
|
||||
assertThat(params, scroll == null ? not(hasKey("scroll")) : hasEntry("scroll", scroll.toString()));
|
||||
assertThat(params, hasEntry("size", Integer.toString(size)));
|
||||
assertThat(params, fetchVersion == null || fetchVersion == true ? hasEntry("version", null) : not(hasEntry("version", null)));
|
||||
}
|
||||
|
||||
public void testInitialSearchEntity() throws IOException {
|
||||
String query = "{\"match_all\":{}}";
|
||||
HttpEntity entity = initialSearchEntity(new BytesArray(query));
|
||||
assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue());
|
||||
assertEquals("{\"query\":" + query + "}",
|
||||
Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)));
|
||||
|
||||
// Invalid XContent fails
|
||||
RuntimeException e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray("{}, \"trailing\": {}")));
|
||||
assertThat(e.getCause().getMessage(), containsString("Unexpected character (',' (code 44))"));
|
||||
e = expectThrows(RuntimeException.class, () -> initialSearchEntity(new BytesArray("{")));
|
||||
assertThat(e.getCause().getMessage(), containsString("Unexpected end-of-input"));
|
||||
}
|
||||
|
||||
public void testScrollParams() {
|
||||
TimeValue scroll = TimeValue.parseTimeValue(randomPositiveTimeValue(), "test");
|
||||
assertThat(scrollParams(scroll), hasEntry("scroll", scroll.toString()));
|
||||
}
|
||||
|
||||
public void testScrollEntity() throws IOException {
|
||||
String scroll = randomAsciiOfLength(30);
|
||||
HttpEntity entity = scrollEntity(scroll);
|
||||
assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue());
|
||||
assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,381 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.reindex.remote;
|
||||
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.bulk.BackoffPolicy;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.index.reindex.ScrollableHitSource.Response;
|
||||
import org.elasticsearch.index.reindex.remote.RemoteScrollableHitSource.ResponseListener;
|
||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMillis;
|
||||
import static org.elasticsearch.common.unit.TimeValue.timeValueMinutes;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class RemoteScrollableHitSourceTests extends ESTestCase {
|
||||
private final String FAKE_SCROLL_ID = "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll";
|
||||
private int retries;
|
||||
private ThreadPool threadPool;
|
||||
private SearchRequest searchRequest;
|
||||
private int retriesAllowed;
|
||||
|
||||
@Before
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = new TestThreadPool(getTestName()) {
|
||||
@Override
|
||||
public Executor executor(String name) {
|
||||
return r -> r.run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScheduledFuture<?> schedule(TimeValue delay, String name, Runnable command) {
|
||||
command.run();
|
||||
return null;
|
||||
}
|
||||
};
|
||||
retries = 0;
|
||||
searchRequest = new SearchRequest();
|
||||
searchRequest.scroll(timeValueMinutes(5));
|
||||
searchRequest.source(new SearchSourceBuilder().size(10).version(true).sort("_doc").size(123));
|
||||
retriesAllowed = 0;
|
||||
}
|
||||
|
||||
@After
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
terminate(threadPool);
|
||||
}
|
||||
|
||||
public void testLookupRemoteVersion() throws Exception {
|
||||
sourceWithMockedRemoteCall(false, "main/0_20_5.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("0.20.5"), v));
|
||||
sourceWithMockedRemoteCall(false, "main/0_90_13.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("0.90.13"), v));
|
||||
sourceWithMockedRemoteCall(false, "main/1_7_5.json").lookupRemoteVersion(v -> assertEquals(Version.fromString("1.7.5"), v));
|
||||
sourceWithMockedRemoteCall(false, "main/2_3_3.json").lookupRemoteVersion(v -> assertEquals(Version.V_2_3_3, v));
|
||||
sourceWithMockedRemoteCall(false, "main/5_0_0_alpha_3.json").lookupRemoteVersion(v -> assertEquals(Version.V_5_0_0_alpha3, v));
|
||||
}
|
||||
|
||||
public void testParseStartOk() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
sourceWithMockedRemoteCall("start_ok.json").doStart(r -> {
|
||||
assertFalse(r.isTimedOut());
|
||||
assertEquals(FAKE_SCROLL_ID, r.getScrollId());
|
||||
assertEquals(4, r.getTotalHits());
|
||||
assertThat(r.getFailures(), empty());
|
||||
assertThat(r.getHits(), hasSize(1));
|
||||
assertEquals("test", r.getHits().get(0).getIndex());
|
||||
assertEquals("test", r.getHits().get(0).getType());
|
||||
assertEquals("AVToMiC250DjIiBO3yJ_", r.getHits().get(0).getId());
|
||||
assertEquals("{\"test\":\"test2\"}", r.getHits().get(0).getSource().utf8ToString());
|
||||
assertNull(r.getHits().get(0).getTTL());
|
||||
assertNull(r.getHits().get(0).getTimestamp());
|
||||
assertNull(r.getHits().get(0).getRouting());
|
||||
called.set(true);
|
||||
});
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
public void testParseScrollOk() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
sourceWithMockedRemoteCall("scroll_ok.json").doStartNextScroll("", timeValueMillis(0), r -> {
|
||||
assertFalse(r.isTimedOut());
|
||||
assertEquals(FAKE_SCROLL_ID, r.getScrollId());
|
||||
assertEquals(4, r.getTotalHits());
|
||||
assertThat(r.getFailures(), empty());
|
||||
assertThat(r.getHits(), hasSize(1));
|
||||
assertEquals("test", r.getHits().get(0).getIndex());
|
||||
assertEquals("test", r.getHits().get(0).getType());
|
||||
assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId());
|
||||
assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString());
|
||||
assertNull(r.getHits().get(0).getTTL());
|
||||
assertNull(r.getHits().get(0).getTimestamp());
|
||||
assertNull(r.getHits().get(0).getRouting());
|
||||
called.set(true);
|
||||
});
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for parsing _ttl, _timestamp, and _routing.
|
||||
*/
|
||||
public void testParseScrollFullyLoaded() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
sourceWithMockedRemoteCall("scroll_fully_loaded.json").doStartNextScroll("", timeValueMillis(0), r -> {
|
||||
assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId());
|
||||
assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString());
|
||||
assertEquals((Long) 1234L, r.getHits().get(0).getTTL());
|
||||
assertEquals((Long) 123444L, r.getHits().get(0).getTimestamp());
|
||||
assertEquals("testrouting", r.getHits().get(0).getRouting());
|
||||
assertEquals("testparent", r.getHits().get(0).getParent());
|
||||
called.set(true);
|
||||
});
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
/**
|
||||
* Versions of Elasticsearch before 2.1.0 don't support sort:_doc and instead need to use search_type=scan. Scan doesn't return
|
||||
* documents the first iteration but reindex doesn't like that. So we jump start strait to the next iteration.
|
||||
*/
|
||||
public void testScanJumpStart() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
sourceWithMockedRemoteCall("start_scan.json", "scroll_ok.json").doStart(r -> {
|
||||
assertFalse(r.isTimedOut());
|
||||
assertEquals(FAKE_SCROLL_ID, r.getScrollId());
|
||||
assertEquals(4, r.getTotalHits());
|
||||
assertThat(r.getFailures(), empty());
|
||||
assertThat(r.getHits(), hasSize(1));
|
||||
assertEquals("test", r.getHits().get(0).getIndex());
|
||||
assertEquals("test", r.getHits().get(0).getType());
|
||||
assertEquals("AVToMiDL50DjIiBO3yKA", r.getHits().get(0).getId());
|
||||
assertEquals("{\"test\":\"test3\"}", r.getHits().get(0).getSource().utf8ToString());
|
||||
assertNull(r.getHits().get(0).getTTL());
|
||||
assertNull(r.getHits().get(0).getTimestamp());
|
||||
assertNull(r.getHits().get(0).getRouting());
|
||||
called.set(true);
|
||||
});
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
public void testParseRejection() throws Exception {
|
||||
// The rejection comes through in the handler because the mocked http response isn't marked as an error
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
// Handling a scroll rejection is the same as handling a search rejection so we reuse the verification code
|
||||
Consumer<Response> checkResponse = r -> {
|
||||
assertFalse(r.isTimedOut());
|
||||
assertEquals(FAKE_SCROLL_ID, r.getScrollId());
|
||||
assertEquals(4, r.getTotalHits());
|
||||
assertThat(r.getFailures(), hasSize(1));
|
||||
assertEquals("test", r.getFailures().get(0).getIndex());
|
||||
assertEquals((Integer) 0, r.getFailures().get(0).getShardId());
|
||||
assertEquals("87A7NvevQxSrEwMbtRCecg", r.getFailures().get(0).getNodeId());
|
||||
assertThat(r.getFailures().get(0).getReason(), instanceOf(EsRejectedExecutionException.class));
|
||||
assertEquals("rejected execution of org.elasticsearch.transport.TransportService$5@52d06af2 on "
|
||||
+ "EsThreadPoolExecutor[search, queue capacity = 1000, org.elasticsearch.common.util.concurrent."
|
||||
+ "EsThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, "
|
||||
+ "completed tasks = 4182]]", r.getFailures().get(0).getReason().getMessage());
|
||||
assertThat(r.getHits(), hasSize(1));
|
||||
assertEquals("test", r.getHits().get(0).getIndex());
|
||||
assertEquals("test", r.getHits().get(0).getType());
|
||||
assertEquals("AVToMiC250DjIiBO3yJ_", r.getHits().get(0).getId());
|
||||
assertEquals("{\"test\":\"test1\"}", r.getHits().get(0).getSource().utf8ToString());
|
||||
called.set(true);
|
||||
};
|
||||
sourceWithMockedRemoteCall("rejection.json").doStart(checkResponse);
|
||||
assertTrue(called.get());
|
||||
called.set(false);
|
||||
sourceWithMockedRemoteCall("rejection.json").doStartNextScroll("scroll", timeValueMillis(0), checkResponse);
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
public void testParseFailureWithStatus() throws Exception {
|
||||
// The rejection comes through in the handler because the mocked http response isn't marked as an error
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
// Handling a scroll rejection is the same as handling a search rejection so we reuse the verification code
|
||||
Consumer<Response> checkResponse = r -> {
|
||||
assertFalse(r.isTimedOut());
|
||||
assertEquals(FAKE_SCROLL_ID, r.getScrollId());
|
||||
assertEquals(10000, r.getTotalHits());
|
||||
assertThat(r.getFailures(), hasSize(1));
|
||||
assertEquals(null, r.getFailures().get(0).getIndex());
|
||||
assertEquals(null, r.getFailures().get(0).getShardId());
|
||||
assertEquals(null, r.getFailures().get(0).getNodeId());
|
||||
assertThat(r.getFailures().get(0).getReason(), instanceOf(RuntimeException.class));
|
||||
assertEquals("Unknown remote exception with reason=[SearchContextMissingException[No search context found for id [82]]]",
|
||||
r.getFailures().get(0).getReason().getMessage());
|
||||
assertThat(r.getHits(), hasSize(1));
|
||||
assertEquals("test", r.getHits().get(0).getIndex());
|
||||
assertEquals("test", r.getHits().get(0).getType());
|
||||
assertEquals("10000", r.getHits().get(0).getId());
|
||||
assertEquals("{\"test\":\"test10000\"}", r.getHits().get(0).getSource().utf8ToString());
|
||||
called.set(true);
|
||||
};
|
||||
sourceWithMockedRemoteCall("failure_with_status.json").doStart(checkResponse);
|
||||
assertTrue(called.get());
|
||||
called.set(false);
|
||||
sourceWithMockedRemoteCall("failure_with_status.json").doStartNextScroll("scroll", timeValueMillis(0), checkResponse);
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
public void testParseRequestFailure() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
Consumer<Response> checkResponse = r -> {
|
||||
assertFalse(r.isTimedOut());
|
||||
assertNull(r.getScrollId());
|
||||
assertEquals(0, r.getTotalHits());
|
||||
assertThat(r.getFailures(), hasSize(1));
|
||||
assertThat(r.getFailures().get(0).getReason(), instanceOf(ParsingException.class));
|
||||
ParsingException failure = (ParsingException) r.getFailures().get(0).getReason();
|
||||
assertEquals("Unknown key for a VALUE_STRING in [invalid].", failure.getMessage());
|
||||
assertEquals(2, failure.getLineNumber());
|
||||
assertEquals(14, failure.getColumnNumber());
|
||||
called.set(true);
|
||||
};
|
||||
sourceWithMockedRemoteCall("request_failure.json").doStart(checkResponse);
|
||||
assertTrue(called.get());
|
||||
called.set(false);
|
||||
sourceWithMockedRemoteCall("request_failure.json").doStartNextScroll("scroll", timeValueMillis(0), checkResponse);
|
||||
assertTrue(called.get());
|
||||
}
|
||||
|
||||
public void testRetryAndSucceed() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
Consumer<Response> checkResponse = r -> {
|
||||
assertThat(r.getFailures(), hasSize(0));
|
||||
called.set(true);
|
||||
};
|
||||
retriesAllowed = between(1, Integer.MAX_VALUE);
|
||||
sourceWithMockedRemoteCall("fail:rejection.json", "start_ok.json").doStart(checkResponse);
|
||||
assertTrue(called.get());
|
||||
assertEquals(1, retries);
|
||||
retries = 0;
|
||||
called.set(false);
|
||||
sourceWithMockedRemoteCall("fail:rejection.json", "scroll_ok.json").doStartNextScroll("scroll", timeValueMillis(0),
|
||||
checkResponse);
|
||||
assertTrue(called.get());
|
||||
assertEquals(1, retries);
|
||||
}
|
||||
|
||||
public void testRetryUntilYouRunOutOfTries() throws Exception {
|
||||
AtomicBoolean called = new AtomicBoolean();
|
||||
Consumer<Response> checkResponse = r -> called.set(true);
|
||||
retriesAllowed = between(0, 10);
|
||||
String[] paths = new String[retriesAllowed + 2];
|
||||
for (int i = 0; i < retriesAllowed + 2; i++) {
|
||||
paths[i] = "fail:rejection.json";
|
||||
}
|
||||
RuntimeException e = expectThrows(RuntimeException.class, () -> sourceWithMockedRemoteCall(paths).doStart(checkResponse));
|
||||
assertEquals("failed", e.getMessage());
|
||||
assertFalse(called.get());
|
||||
assertEquals(retriesAllowed, retries);
|
||||
retries = 0;
|
||||
e = expectThrows(RuntimeException.class,
|
||||
() -> sourceWithMockedRemoteCall(paths).doStartNextScroll("scroll", timeValueMillis(0), checkResponse));
|
||||
assertEquals("failed", e.getMessage());
|
||||
assertFalse(called.get());
|
||||
assertEquals(retriesAllowed, retries);
|
||||
}
|
||||
|
||||
private RemoteScrollableHitSource sourceWithMockedRemoteCall(String... paths) throws Exception {
|
||||
return sourceWithMockedRemoteCall(true, paths);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a hit source that doesn't make the remote request and instead returns data from some files. Also requests are always returned
|
||||
* synchronously rather than asynchronously.
|
||||
*/
|
||||
private RemoteScrollableHitSource sourceWithMockedRemoteCall(boolean mockRemoteVersion, String... paths) throws Exception {
|
||||
URL[] resources = new URL[paths.length];
|
||||
for (int i = 0; i < paths.length; i++) {
|
||||
resources[i] = Thread.currentThread().getContextClassLoader().getResource("responses/" + paths[i].replace("fail:", ""));
|
||||
if (resources[i] == null) {
|
||||
throw new IllegalArgumentException("Couldn't find [" + paths[i] + "]");
|
||||
}
|
||||
}
|
||||
RemoteScrollableHitSource.AsyncClient client = new RemoteScrollableHitSource.AsyncClient() {
|
||||
int responseCount = 0;
|
||||
@Override
|
||||
public void performRequest(String method, String uri, Map<String, String> params, HttpEntity entity,
|
||||
ResponseListener listener) {
|
||||
try {
|
||||
URL resource = resources[responseCount];
|
||||
String path = paths[responseCount++];
|
||||
InputStream stream = resource.openStream();
|
||||
if (path.startsWith("fail:")) {
|
||||
String body = Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8));
|
||||
if (path.equals("fail:rejection.json")) {
|
||||
listener.onRetryableFailure(new RuntimeException(body));
|
||||
} else {
|
||||
listener.onFailure(new RuntimeException(body));
|
||||
}
|
||||
} else {
|
||||
listener.onResponse(stream);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
}
|
||||
};
|
||||
TestRemoteScrollableHitSource hitSource = new TestRemoteScrollableHitSource(client) {
|
||||
@Override
|
||||
void lookupRemoteVersion(Consumer<Version> onVersion) {
|
||||
if (mockRemoteVersion) {
|
||||
onVersion.accept(Version.CURRENT);
|
||||
} else {
|
||||
super.lookupRemoteVersion(onVersion);
|
||||
}
|
||||
}
|
||||
};
|
||||
if (mockRemoteVersion) {
|
||||
hitSource.remoteVersion = Version.CURRENT;
|
||||
}
|
||||
return hitSource;
|
||||
}
|
||||
|
||||
private BackoffPolicy backoff() {
|
||||
return BackoffPolicy.constantBackoff(timeValueMillis(0), retriesAllowed);
|
||||
}
|
||||
|
||||
private void countRetry() {
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
private void failRequest(Throwable t) {
|
||||
throw new RuntimeException("failed", t);
|
||||
}
|
||||
|
||||
private class TestRemoteScrollableHitSource extends RemoteScrollableHitSource {
|
||||
public TestRemoteScrollableHitSource(RemoteScrollableHitSource.AsyncClient client) {
|
||||
super(RemoteScrollableHitSourceTests.this.logger, backoff(), RemoteScrollableHitSourceTests.this.threadPool,
|
||||
RemoteScrollableHitSourceTests.this::countRetry, RemoteScrollableHitSourceTests.this::failRequest, client,
|
||||
new BytesArray("{}"), RemoteScrollableHitSourceTests.this.searchRequest);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
{
|
||||
"_scroll_id": "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll",
|
||||
"took": 3,
|
||||
"timed_out": false,
|
||||
"_shards": {
|
||||
"total": 5,
|
||||
"successful": 4,
|
||||
"failed": 1,
|
||||
"failures": [ {
|
||||
"status": 404,
|
||||
"reason": "SearchContextMissingException[No search context found for id [82]]"
|
||||
} ]
|
||||
},
|
||||
"hits": {
|
||||
"total": 10000,
|
||||
"max_score": 0.0,
|
||||
"hits": [ {
|
||||
"_index": "test",
|
||||
"_type": "test",
|
||||
"_id": "10000",
|
||||
"_version": 1,
|
||||
"_score": 0.0,
|
||||
"_source": {
|
||||
"test": "test10000"
|
||||
}
|
||||
} ]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
{
|
||||
"ok" : true,
|
||||
"status" : 200,
|
||||
"name" : "Techno",
|
||||
"version" : {
|
||||
"number" : "0.20.5",
|
||||
"snapshot_build" : false
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"ok" : true,
|
||||
"status" : 200,
|
||||
"name" : "Mogul of the Mystic Mountain",
|
||||
"version" : {
|
||||
"number" : "0.90.13",
|
||||
"build_hash" : "249c9c5e06765c9e929e92b1d235e1ba4dc679fa",
|
||||
"build_timestamp" : "2014-03-25T15:27:12Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.6"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
{
|
||||
"status" : 200,
|
||||
"name" : "Robert Kelly",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"version" : {
|
||||
"number" : "1.7.5",
|
||||
"build_hash" : "00f95f4ffca6de89d68b7ccaf80d148f1f70e4d4",
|
||||
"build_timestamp" : "2016-02-02T09:55:30Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.10.4"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"name" : "Ezekiel Stane",
|
||||
"cluster_name" : "elasticsearch",
|
||||
"version" : {
|
||||
"number" : "2.3.3",
|
||||
"build_hash" : "218bdf10790eef486ff2c41a3df5cfa32dadcfde",
|
||||
"build_timestamp" : "2016-05-17T15:40:04Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "5.5.0"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"name" : "Paibo",
|
||||
"cluster_name" : "distribution_run",
|
||||
"version" : {
|
||||
"number" : "5.0.0-alpha3",
|
||||
"build_hash" : "42e092f",
|
||||
"build_date" : "2016-05-26T16:55:45.405Z",
|
||||
"build_snapshot" : true,
|
||||
"lucene_version" : "6.0.0"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll",
|
||||
"took" : 6,
|
||||
"timed_out" : false,
|
||||
"_shards" : {
|
||||
"total" : 5,
|
||||
"successful" : 4,
|
||||
"failed" : 1,
|
||||
"failures" : [ {
|
||||
"shard" : 0,
|
||||
"index" : "test",
|
||||
"node" : "87A7NvevQxSrEwMbtRCecg",
|
||||
"reason" : {
|
||||
"type" : "es_rejected_execution_exception",
|
||||
"reason" : "rejected execution of org.elasticsearch.transport.TransportService$5@52d06af2 on EsThreadPoolExecutor[search, queue capacity = 1000, org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor@778ea553[Running, pool size = 7, active threads = 7, queued tasks = 1000, completed tasks = 4182]]"
|
||||
}
|
||||
} ]
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 4,
|
||||
"max_score" : null,
|
||||
"hits" : [ {
|
||||
"_index" : "test",
|
||||
"_type" : "test",
|
||||
"_id" : "AVToMiC250DjIiBO3yJ_",
|
||||
"_version" : 1,
|
||||
"_score" : null,
|
||||
"_source" : {
|
||||
"test" : "test1"
|
||||
},
|
||||
"sort" : [ 0 ]
|
||||
} ]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"error" : {
|
||||
"root_cause" : [ {
|
||||
"type" : "parsing_exception",
|
||||
"reason" : "Unknown key for a VALUE_STRING in [invalid].",
|
||||
"line" : 2,
|
||||
"col" : 14
|
||||
} ],
|
||||
"type" : "parsing_exception",
|
||||
"reason" : "Unknown key for a VALUE_STRING in [invalid].",
|
||||
"line" : 2,
|
||||
"col" : 14
|
||||
},
|
||||
"status" : 400
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
{
|
||||
"_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll",
|
||||
"took" : 3,
|
||||
"timed_out" : false,
|
||||
"terminated_early" : true,
|
||||
"_shards" : {
|
||||
"total" : 5,
|
||||
"successful" : 5,
|
||||
"failed" : 0
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 4,
|
||||
"max_score" : null,
|
||||
"hits" : [ {
|
||||
"_index" : "test",
|
||||
"_type" : "test",
|
||||
"_id" : "AVToMiDL50DjIiBO3yKA",
|
||||
"_version" : 1,
|
||||
"_score" : null,
|
||||
"_source" : {
|
||||
"test" : "test3"
|
||||
},
|
||||
"sort" : [ 0 ],
|
||||
"_routing": "testrouting",
|
||||
"_parent": "testparent",
|
||||
"_ttl" : 1234,
|
||||
"_timestamp": 123444
|
||||
} ]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll",
|
||||
"took" : 3,
|
||||
"timed_out" : false,
|
||||
"terminated_early" : true,
|
||||
"_shards" : {
|
||||
"total" : 5,
|
||||
"successful" : 5,
|
||||
"failed" : 0
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 4,
|
||||
"max_score" : null,
|
||||
"hits" : [ {
|
||||
"_index" : "test",
|
||||
"_type" : "test",
|
||||
"_id" : "AVToMiDL50DjIiBO3yKA",
|
||||
"_version" : 1,
|
||||
"_score" : null,
|
||||
"_source" : {
|
||||
"test" : "test3"
|
||||
},
|
||||
"sort" : [ 0 ]
|
||||
} ]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
{
|
||||
"_scroll_id" : "DnF1ZXJ5VGhlbkZldGNoBQAAAfakescroll",
|
||||
"took" : 6,
|
||||
"timed_out" : false,
|
||||
"_shards" : {
|
||||
"total" : 5,
|
||||
"successful" : 5,
|
||||
"failed" : 0
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 4,
|
||||
"max_score" : null,
|
||||
"hits" : [ {
|
||||
"_index" : "test",
|
||||
"_type" : "test",
|
||||
"_id" : "AVToMiC250DjIiBO3yJ_",
|
||||
"_version" : 1,
|
||||
"_score" : null,
|
||||
"_source" : {
|
||||
"test" : "test2"
|
||||
},
|
||||
"sort" : [ 0 ]
|
||||
} ]
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
{
|
||||
"_scroll_id" : "c2Nhbjs1OzQ0Ojd5aUZoUm5hU2lDZ3ZvUHMzMXdGQ1E7NDU6N3lpRmhSbmFTaUNndm9QczMxd0ZDUTs0Mjo3eWlGaFJuYVNpQ2d2b1BzMzF3RkNROzQzOjd5aUZoUm5hU2lDZ3ZvUHMzMXdGQ1E7NDE6N3lpRmhSbmFTaUNndm9QczMxd0ZDUTsxO3RvdGFsX2hpdHM6MTAwMDA7",
|
||||
"took" : 13,
|
||||
"timed_out" : false,
|
||||
"_shards" : {
|
||||
"total" : 5,
|
||||
"successful" : 5,
|
||||
"failed" : 0
|
||||
},
|
||||
"hits" : {
|
||||
"total" : 10000,
|
||||
"max_score" : 0.0,
|
||||
"hits" : [ ]
|
||||
}
|
||||
}
|
|
@ -224,3 +224,46 @@
|
|||
stored_fields: [_id]
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"unwhitelisted remote host fails":
|
||||
- do:
|
||||
catch: /\[badremote:9200\] not whitelisted in reindex.remote.whitelist/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://badremote:9200
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"badly formatted remote host fails":
|
||||
- do:
|
||||
catch: /\[host\] must be of the form \[scheme\].//\[host\].\[port\]/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: badremote
|
||||
weird: stuff
|
||||
badkey: is bad
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
|
||||
---
|
||||
"junk in remote fails":
|
||||
- do:
|
||||
catch: /Unsupported fields in \[remote\]. \[weird,badkey\]/
|
||||
reindex:
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://okremote:9200
|
||||
weird: stuff
|
||||
badkey: is bad
|
||||
index: test
|
||||
dest:
|
||||
index: dest
|
||||
|
|
|
@ -0,0 +1,207 @@
|
|||
---
|
||||
"Basic reindex from remote":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://${host}
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 1}
|
||||
- match: {updated: 0}
|
||||
- match: {version_conflicts: 0}
|
||||
- match: {batches: 1}
|
||||
- match: {failures: []}
|
||||
- match: {throttled_millis: 0}
|
||||
- gte: { took: 0 }
|
||||
- is_false: task
|
||||
- is_false: deleted
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
text: test
|
||||
- match: {hits.total: 1}
|
||||
|
||||
---
|
||||
"Reindex from remote with query":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 2
|
||||
body: { "text": "test2" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://${host}
|
||||
index: source
|
||||
query:
|
||||
match:
|
||||
text: test2
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
body:
|
||||
query:
|
||||
match_all: {}
|
||||
- match: {hits.total: 1}
|
||||
|
||||
---
|
||||
"Reindex from remote with routing":
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
routing: foo
|
||||
refresh: true
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://${host}
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 1}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
routing: foo
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
text: test
|
||||
- match: {hits.total: 1}
|
||||
|
||||
---
|
||||
"Reindex from remote with parent/child":
|
||||
- do:
|
||||
indices.create:
|
||||
index: source
|
||||
body:
|
||||
mappings:
|
||||
foo: {}
|
||||
bar:
|
||||
_parent:
|
||||
type: foo
|
||||
- do:
|
||||
indices.create:
|
||||
index: dest
|
||||
body:
|
||||
mappings:
|
||||
foo: {}
|
||||
bar:
|
||||
_parent:
|
||||
type: foo
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: foo
|
||||
id: 1
|
||||
body: { "text": "test" }
|
||||
- do:
|
||||
index:
|
||||
index: source
|
||||
type: bar
|
||||
id: 1
|
||||
parent: 1
|
||||
body: { "text": "test2" }
|
||||
- do:
|
||||
indices.refresh: {}
|
||||
|
||||
# Fetch the http host. We use the host of the master because we know there will always be a master.
|
||||
- do:
|
||||
cluster.state: {}
|
||||
- set: { master_node: master }
|
||||
- do:
|
||||
nodes.info:
|
||||
metric: [ http ]
|
||||
- is_true: nodes.$master.http.publish_address
|
||||
- set: {nodes.$master.http.publish_address: host}
|
||||
- do:
|
||||
reindex:
|
||||
refresh: true
|
||||
body:
|
||||
source:
|
||||
remote:
|
||||
host: http://${host}
|
||||
index: source
|
||||
dest:
|
||||
index: dest
|
||||
- match: {created: 2}
|
||||
|
||||
- do:
|
||||
search:
|
||||
index: dest
|
||||
body:
|
||||
query:
|
||||
has_parent:
|
||||
parent_type: foo
|
||||
query:
|
||||
match:
|
||||
text: test
|
||||
- match: {hits.total: 1}
|
|
@ -71,7 +71,7 @@ public class Stash implements ToXContent {
|
|||
* as arguments for following requests (e.g. scroll_id)
|
||||
*/
|
||||
public boolean containsStashedValue(Object key) {
|
||||
if (key == null) {
|
||||
if (key == null || false == key instanceof CharSequence) {
|
||||
return false;
|
||||
}
|
||||
String stashKey = key.toString();
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.test.rest.test;
|
||||
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.rest.Stash;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
public class StashTests extends ESTestCase {
|
||||
public void testReplaceStashedValuesEmbeddedStashKey() throws IOException {
|
||||
Stash stash = new Stash();
|
||||
stash.stashValue("stashed", "bar");
|
||||
|
||||
Map<String, Object> expected = new HashMap<>();
|
||||
expected.put("key", singletonMap("a", "foobar"));
|
||||
Map<String, Object> map = new HashMap<>();
|
||||
Map<String, Object> map2 = new HashMap<>();
|
||||
map2.put("a", "foo${stashed}");
|
||||
map.put("key", map2);
|
||||
|
||||
Map<String, Object> actual = stash.replaceStashedValues(map);
|
||||
assertEquals(expected, actual);
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue