Merge branch 'master' into ccr
* master: QA: Add xpack tests to rolling upgrade (#30795) Modify state of VerifyRepositoryResponse for bwc (#30762) Reduce CLI scripts to one-liners on Windows (#30772) Simplify number of shards setting (#30783) Replace Request#setHeaders with addHeader (#30588) [TEST] remove endless wait in RestClientTests (#30776) [Docs] Fix script-fields snippet execution (#30693) Upgrade to Lucene-7.4.0-snapshot-cc2ee23050 (#30778) [DOCS] Add SAML configuration information (#30548) [DOCS] Remove X-Pack references from SQL CLI (#30694) Make http pipelining support mandatory (#30695) [Docs] Fix typo in circuit breaker docs (#29659) [Feature] Adding a char_group tokenizer (#24186) [Docs] Fix broken cross link in documentation Test: wait for netty threads in a JUnit ClassRule (#30763) Increase the maximum number of filters that may be in the cache. (#30655) [Security] Include an empty json object in an json array when FLS filters out all fields (#30709) [TEST] Wait for CS to be fully applied in testDeleteCreateInOneBulk Add more yaml tests for get alias API (#29513) Ignore empty completion input (#30713) [DOCS] fixed incorrect default [ML] Filter undefined job groups from update calendar actions (#30757) Fix docs failure on language analyzers (#30722) [Docs] Fix inconsistencies in snapshot/restore doc (#30480) Enable installing plugins from snapshots.elastic.co (#30765) Remove fedora 26, add 28 (#30683) Accept Gradle build scan agreement (#30645) Remove logging from elasticsearch-nio jar (#30761) Add Delete Repository High Level REST API (#30666)
This commit is contained in:
commit
fb48b029e8
|
@ -379,7 +379,7 @@ You can choose which boxes to test by setting the `-Pvagrant.boxes` project prop
|
|||
the valid options for this property are:
|
||||
|
||||
* `sample` - The default, only chooses ubuntu-1404 and centos-7
|
||||
* List of box names, comma separated (e.g. `oel-7,fedora-26`) - Chooses exactly the boxes listed.
|
||||
* List of box names, comma separated (e.g. `oel-7,fedora-28`) - Chooses exactly the boxes listed.
|
||||
* `linux-all` - All linux boxes.
|
||||
* `windows-all` - All Windows boxes. If there are any Windows boxes which do not
|
||||
have images available when this value is provided, the build will fail.
|
||||
|
@ -406,8 +406,8 @@ These are the linux flavors supported, all of which we provide images for
|
|||
* debian-9 aka stretch, the current debian stable distribution
|
||||
* centos-6
|
||||
* centos-7
|
||||
* fedora-26
|
||||
* fedora-27
|
||||
* fedora-28
|
||||
* oel-6 aka Oracle Enterprise Linux 6
|
||||
* oel-7 aka Oracle Enterprise Linux 7
|
||||
* sles-12
|
||||
|
|
|
@ -97,18 +97,18 @@ Vagrant.configure(2) do |config|
|
|||
rpm_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-26'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-26-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-27'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-27-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'fedora-28'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/fedora-28-x86_64'
|
||||
dnf_common config, box
|
||||
end
|
||||
end
|
||||
'opensuse-42'.tap do |box|
|
||||
config.vm.define box, define_opts do |config|
|
||||
config.vm.box = 'elastic/opensuse-42-x86_64'
|
||||
|
|
10
build.gradle
10
build.gradle
|
@ -36,6 +36,16 @@ import java.nio.file.Files
|
|||
import java.nio.file.Path
|
||||
import java.security.MessageDigest
|
||||
|
||||
plugins {
|
||||
id 'com.gradle.build-scan' version '1.13.2'
|
||||
}
|
||||
if (properties.get("org.elasticsearch.acceptScanTOS", "false") == "true") {
|
||||
buildScan {
|
||||
termsOfServiceUrl = 'https://gradle.com/terms-of-service'
|
||||
termsOfServiceAgree = 'yes'
|
||||
}
|
||||
}
|
||||
|
||||
// common maven publishing configuration
|
||||
subprojects {
|
||||
group = 'org.elasticsearch'
|
||||
|
|
|
@ -23,8 +23,8 @@ class VagrantTestPlugin implements Plugin<Project> {
|
|||
'centos-7',
|
||||
'debian-8',
|
||||
'debian-9',
|
||||
'fedora-26',
|
||||
'fedora-27',
|
||||
'fedora-28',
|
||||
'oel-6',
|
||||
'oel-7',
|
||||
'opensuse-42',
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
elasticsearch = 7.0.0-alpha1
|
||||
lucene = 7.4.0-snapshot-59f2b7aec2
|
||||
lucene = 7.4.0-snapshot-cc2ee23050
|
||||
|
||||
# optional dependencies
|
||||
spatial4j = 0.7
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.http.entity.ContentType;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
|
@ -711,6 +712,16 @@ final class RequestConverters {
|
|||
return request;
|
||||
}
|
||||
|
||||
static Request deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest) {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_snapshot").addPathPart(deleteRepositoryRequest.name()).build();
|
||||
Request request = new Request(HttpDelete.METHOD_NAME, endpoint);
|
||||
|
||||
Params parameters = new Params(request);
|
||||
parameters.withMasterTimeout(deleteRepositoryRequest.masterNodeTimeout());
|
||||
parameters.withTimeout(deleteRepositoryRequest.timeout());
|
||||
return request;
|
||||
}
|
||||
|
||||
static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException {
|
||||
String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build();
|
||||
Request request = new Request(HttpPut.METHOD_NAME, endpoint);
|
||||
|
|
|
@ -26,8 +26,6 @@ import org.elasticsearch.ElasticsearchStatusException;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -592,7 +590,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
throw validationException;
|
||||
}
|
||||
Request req = requestConverter.apply(request);
|
||||
req.setHeaders(headers);
|
||||
addHeaders(req, headers);
|
||||
Response response;
|
||||
try {
|
||||
response = client.performRequest(req);
|
||||
|
@ -642,12 +640,19 @@ public class RestHighLevelClient implements Closeable {
|
|||
listener.onFailure(e);
|
||||
return;
|
||||
}
|
||||
req.setHeaders(headers);
|
||||
addHeaders(req, headers);
|
||||
|
||||
ResponseListener responseListener = wrapResponseListener(responseConverter, listener, ignores);
|
||||
client.performRequestAsync(req, responseListener);
|
||||
}
|
||||
|
||||
private static void addHeaders(Request request, Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
final <Resp> ResponseListener wrapResponseListener(CheckedFunction<Response, Resp, IOException> responseConverter,
|
||||
ActionListener<Resp> actionListener, Set<Integer> ignores) {
|
||||
return new ResponseListener() {
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client;
|
|||
|
||||
import org.apache.http.Header;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -90,4 +92,28 @@ public final class SnapshotClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository,
|
||||
PutRepositoryResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes a snapshot repository.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public DeleteRepositoryResponse deleteRepository(DeleteRepositoryRequest deleteRepositoryRequest, Header... headers)
|
||||
throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository,
|
||||
DeleteRepositoryResponse::fromXContent, emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously deletes a snapshot repository.
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshots.html"> Snapshot and Restore
|
||||
* API on elastic.co</a>
|
||||
*/
|
||||
public void deleteRepositoryAsync(DeleteRepositoryRequest deleteRepositoryRequest,
|
||||
ActionListener<DeleteRepositoryResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(deleteRepositoryRequest, RequestConverters::deleteRepository,
|
||||
DeleteRepositoryResponse::fromXContent, listener, emptySet(), headers);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -73,12 +73,12 @@ public class CustomRestHighLevelClientTests extends ESTestCase {
|
|||
final RestClient restClient = mock(RestClient.class);
|
||||
restHighLevelClient = new CustomRestClient(restClient);
|
||||
|
||||
doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders()[0]))
|
||||
doAnswer(inv -> mockPerformRequest(((Request) inv.getArguments()[0]).getHeaders().iterator().next()))
|
||||
.when(restClient)
|
||||
.performRequest(any(Request.class));
|
||||
|
||||
doAnswer(inv -> mockPerformRequestAsync(
|
||||
((Request) inv.getArguments()[0]).getHeaders()[0],
|
||||
((Request) inv.getArguments()[0]).getHeaders().iterator().next(),
|
||||
(ResponseListener) inv.getArguments()[1]))
|
||||
.when(restClient)
|
||||
.performRequestAsync(any(Request.class), any(ResponseListener.class));
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.http.util.EntityUtils;
|
|||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest;
|
||||
|
@ -1546,7 +1547,7 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testCreateRepository() throws IOException {
|
||||
String repository = "repo";
|
||||
String repository = randomIndicesNames(1, 1)[0];
|
||||
String endpoint = "/_snapshot/" + repository;
|
||||
Path repositoryLocation = PathUtils.get(".");
|
||||
PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository);
|
||||
|
@ -1555,10 +1556,10 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
|
||||
putRepositoryRequest.settings(
|
||||
Settings.builder()
|
||||
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
|
||||
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
|
||||
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
|
||||
.build());
|
||||
.put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation)
|
||||
.put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean())
|
||||
.put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
|
||||
.build());
|
||||
|
||||
Request request = RequestConverters.createRepository(putRepositoryRequest);
|
||||
assertThat(endpoint, equalTo(request.getEndpoint()));
|
||||
|
@ -1566,6 +1567,24 @@ public class RequestConvertersTests extends ESTestCase {
|
|||
assertToXContentBody(putRepositoryRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testDeleteRepository() {
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
String repository = randomIndicesNames(1, 1)[0];
|
||||
|
||||
StringBuilder endpoint = new StringBuilder("/_snapshot/" + repository);
|
||||
|
||||
DeleteRepositoryRequest deleteRepositoryRequest = new DeleteRepositoryRequest();
|
||||
deleteRepositoryRequest.name(repository);
|
||||
setRandomMasterTimeout(deleteRepositoryRequest, expectedParams);
|
||||
setRandomTimeout(deleteRepositoryRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
|
||||
Request request = RequestConverters.deleteRepository(deleteRepositoryRequest);
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(HttpDelete.METHOD_NAME, equalTo(request.getMethod()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testPutTemplateRequest() throws Exception {
|
||||
Map<String, String> names = new HashMap<>();
|
||||
names.put("log", "log");
|
||||
|
|
|
@ -19,7 +19,11 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -29,6 +33,7 @@ import org.elasticsearch.repositories.fs.FsRepository;
|
|||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -40,7 +45,6 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
request.type(type);
|
||||
return execute(request, highLevelClient().snapshot()::createRepository,
|
||||
highLevelClient().snapshot()::createRepositoryAsync);
|
||||
|
||||
}
|
||||
|
||||
public void testCreateRepository() throws IOException {
|
||||
|
@ -48,7 +52,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertTrue(response.isAcknowledged());
|
||||
}
|
||||
|
||||
public void testModulesGetRepositoriesUsingParams() throws IOException {
|
||||
public void testSnapshotGetRepositoriesUsingParams() throws IOException {
|
||||
String testRepository = "test";
|
||||
assertTrue(createTestRepository(testRepository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
|
@ -60,7 +64,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(1, equalTo(response.repositories().size()));
|
||||
}
|
||||
|
||||
public void testModulesGetDefaultRepositories() throws IOException {
|
||||
public void testSnapshotGetDefaultRepositories() throws IOException {
|
||||
assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
assertTrue(createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged());
|
||||
|
||||
|
@ -69,7 +73,7 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(2, equalTo(response.repositories().size()));
|
||||
}
|
||||
|
||||
public void testModulesGetRepositoriesNonExistent() throws IOException {
|
||||
public void testSnapshotGetRepositoriesNonExistent() {
|
||||
String repository = "doesnotexist";
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository});
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class, () -> execute(request,
|
||||
|
@ -79,4 +83,23 @@ public class SnapshotIT extends ESRestHighLevelClientTestCase {
|
|||
assertThat(exception.getMessage(), equalTo(
|
||||
"Elasticsearch exception [type=repository_missing_exception, reason=[" + repository + "] missing]"));
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepository() throws IOException {
|
||||
String repository = "test";
|
||||
String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}";
|
||||
|
||||
highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository,
|
||||
Collections.emptyMap(), new StringEntity(repositorySettings, ContentType.APPLICATION_JSON));
|
||||
|
||||
GetRepositoriesRequest request = new GetRepositoriesRequest();
|
||||
GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories,
|
||||
highLevelClient().snapshot()::getRepositoriesAsync);
|
||||
assertThat(1, equalTo(response.repositories().size()));
|
||||
|
||||
DeleteRepositoryRequest deleteRequest = new DeleteRepositoryRequest(repository);
|
||||
DeleteRepositoryResponse deleteResponse = execute(deleteRequest, highLevelClient().snapshot()::deleteRepository,
|
||||
highLevelClient().snapshot()::deleteRepositoryAsync);
|
||||
|
||||
assertTrue(deleteResponse.isAcknowledged());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ package org.elasticsearch.client.documentation;
|
|||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.delete.DeleteRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest;
|
||||
|
@ -235,6 +237,66 @@ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepository() throws IOException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
|
||||
createTestRepositories();
|
||||
|
||||
// tag::delete-repository-request
|
||||
DeleteRepositoryRequest request = new DeleteRepositoryRequest(repositoryName);
|
||||
// end::delete-repository-request
|
||||
|
||||
// tag::delete-repository-request-masterTimeout
|
||||
request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.masterNodeTimeout("1m"); // <2>
|
||||
// end::delete-repository-request-masterTimeout
|
||||
// tag::delete-repository-request-timeout
|
||||
request.timeout(TimeValue.timeValueMinutes(1)); // <1>
|
||||
request.timeout("1m"); // <2>
|
||||
// end::delete-repository-request-timeout
|
||||
|
||||
// tag::delete-repository-execute
|
||||
DeleteRepositoryResponse response = client.snapshot().deleteRepository(request);
|
||||
// end::delete-repository-execute
|
||||
|
||||
// tag::delete-repository-response
|
||||
boolean acknowledged = response.isAcknowledged(); // <1>
|
||||
// end::delete-repository-response
|
||||
assertTrue(acknowledged);
|
||||
}
|
||||
|
||||
public void testSnapshotDeleteRepositoryAsync() throws InterruptedException {
|
||||
RestHighLevelClient client = highLevelClient();
|
||||
{
|
||||
DeleteRepositoryRequest request = new DeleteRepositoryRequest();
|
||||
|
||||
// tag::delete-repository-execute-listener
|
||||
ActionListener<DeleteRepositoryResponse> listener =
|
||||
new ActionListener<DeleteRepositoryResponse>() {
|
||||
@Override
|
||||
public void onResponse(DeleteRepositoryResponse deleteRepositoryResponse) {
|
||||
// <1>
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
// <2>
|
||||
}
|
||||
};
|
||||
// end::delete-repository-execute-listener
|
||||
|
||||
// Replace the empty listener by a blocking listener in test
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
listener = new LatchedActionListener<>(listener, latch);
|
||||
|
||||
// tag::delete-repository-execute-async
|
||||
client.snapshot().deleteRepositoryAsync(request, listener); // <1>
|
||||
// end::delete-repository-execute-async
|
||||
|
||||
assertTrue(latch.await(30L, TimeUnit.SECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
private void createTestRepositories() throws IOException {
|
||||
PutRepositoryRequest request = new PutRepositoryRequest(repositoryName);
|
||||
request.type(FsRepository.TYPE);
|
||||
|
|
|
@ -19,14 +19,17 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.apache.http.nio.protocol.HttpAsyncResponseConsumer;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
|
@ -36,13 +39,12 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* HTTP Request to Elasticsearch.
|
||||
*/
|
||||
public final class Request {
|
||||
private static final Header[] NO_HEADERS = new Header[0];
|
||||
private final String method;
|
||||
private final String endpoint;
|
||||
private final Map<String, String> parameters = new HashMap<>();
|
||||
private final List<Header> headers = new ArrayList<>();
|
||||
|
||||
private HttpEntity entity;
|
||||
private Header[] headers = NO_HEADERS;
|
||||
private HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory =
|
||||
HttpAsyncResponseConsumerFactory.DEFAULT;
|
||||
|
||||
|
@ -125,21 +127,19 @@ public final class Request {
|
|||
}
|
||||
|
||||
/**
|
||||
* Set the headers to attach to the request.
|
||||
* Add the provided header to the request.
|
||||
*/
|
||||
public void setHeaders(Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
for (Header header : headers) {
|
||||
Objects.requireNonNull(header, "header cannot be null");
|
||||
}
|
||||
this.headers = headers;
|
||||
public void addHeader(String name, String value) {
|
||||
Objects.requireNonNull(name, "header name cannot be null");
|
||||
Objects.requireNonNull(value, "header value cannot be null");
|
||||
this.headers.add(new ReqHeader(name, value));
|
||||
}
|
||||
|
||||
/**
|
||||
* Headers to attach to the request.
|
||||
*/
|
||||
public Header[] getHeaders() {
|
||||
return headers;
|
||||
List<Header> getHeaders() {
|
||||
return Collections.unmodifiableList(headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -175,13 +175,13 @@ public final class Request {
|
|||
if (entity != null) {
|
||||
b.append(", entity=").append(entity);
|
||||
}
|
||||
if (headers.length > 0) {
|
||||
if (headers.size() > 0) {
|
||||
b.append(", headers=");
|
||||
for (int h = 0; h < headers.length; h++) {
|
||||
for (int h = 0; h < headers.size(); h++) {
|
||||
if (h != 0) {
|
||||
b.append(',');
|
||||
}
|
||||
b.append(headers[h].toString());
|
||||
b.append(headers.get(h).toString());
|
||||
}
|
||||
}
|
||||
if (httpAsyncResponseConsumerFactory != HttpAsyncResponseConsumerFactory.DEFAULT) {
|
||||
|
@ -204,12 +204,40 @@ public final class Request {
|
|||
&& endpoint.equals(other.endpoint)
|
||||
&& parameters.equals(other.parameters)
|
||||
&& Objects.equals(entity, other.entity)
|
||||
&& Arrays.equals(headers, other.headers)
|
||||
&& headers.equals(other.headers)
|
||||
&& httpAsyncResponseConsumerFactory.equals(other.httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(method, endpoint, parameters, entity, Arrays.hashCode(headers), httpAsyncResponseConsumerFactory);
|
||||
return Objects.hash(method, endpoint, parameters, entity, headers.hashCode(), httpAsyncResponseConsumerFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom implementation of {@link BasicHeader} that overrides equals and hashCode.
|
||||
*/
|
||||
static final class ReqHeader extends BasicHeader {
|
||||
|
||||
ReqHeader(String name, String value) {
|
||||
super(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other instanceof ReqHeader) {
|
||||
Header otherHeader = (Header) other;
|
||||
return Objects.equals(getName(), otherHeader.getName()) &&
|
||||
Objects.equals(getValue(), otherHeader.getValue());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getName(), getValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -215,7 +215,7 @@ public class RestClient implements Closeable {
|
|||
@Deprecated
|
||||
public Response performRequest(String method, String endpoint, Header... headers) throws IOException {
|
||||
Request request = new Request(method, endpoint);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
|
@ -237,7 +237,7 @@ public class RestClient implements Closeable {
|
|||
public Response performRequest(String method, String endpoint, Map<String, String> params, Header... headers) throws IOException {
|
||||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
|
@ -264,7 +264,7 @@ public class RestClient implements Closeable {
|
|||
Request request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
|
@ -305,7 +305,7 @@ public class RestClient implements Closeable {
|
|||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
return performRequest(request);
|
||||
}
|
||||
|
||||
|
@ -325,7 +325,7 @@ public class RestClient implements Closeable {
|
|||
Request request;
|
||||
try {
|
||||
request = new Request(method, endpoint);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
|
@ -352,7 +352,7 @@ public class RestClient implements Closeable {
|
|||
try {
|
||||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
|
@ -383,7 +383,7 @@ public class RestClient implements Closeable {
|
|||
request = new Request(method, endpoint);
|
||||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
|
@ -420,7 +420,7 @@ public class RestClient implements Closeable {
|
|||
addParameters(request, params);
|
||||
request.setEntity(entity);
|
||||
request.setHttpAsyncResponseConsumerFactory(httpAsyncResponseConsumerFactory);
|
||||
request.setHeaders(headers);
|
||||
addHeaders(request, headers);
|
||||
} catch (Exception e) {
|
||||
responseListener.onFailure(e);
|
||||
return;
|
||||
|
@ -539,9 +539,9 @@ public class RestClient implements Closeable {
|
|||
});
|
||||
}
|
||||
|
||||
private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) {
|
||||
private void setHeaders(HttpRequest httpRequest, Collection<Header> requestHeaders) {
|
||||
// request headers override default headers, so we don't add default headers if they exist as request headers
|
||||
final Set<String> requestNames = new HashSet<>(requestHeaders.length);
|
||||
final Set<String> requestNames = new HashSet<>(requestHeaders.size());
|
||||
for (Header requestHeader : requestHeaders) {
|
||||
httpRequest.addHeader(requestHeader);
|
||||
requestNames.add(requestHeader.getName());
|
||||
|
@ -877,10 +877,24 @@ public class RestClient implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all headers from the provided varargs argument to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
@Deprecated
|
||||
private static void addHeaders(Request request, Header... headers) {
|
||||
Objects.requireNonNull(headers, "headers cannot be null");
|
||||
for (Header header : headers) {
|
||||
Objects.requireNonNull(header, "header cannot be null");
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add all parameters from a map to a {@link Request}. This only exists
|
||||
* to support methods that exist for backwards compatibility.
|
||||
*/
|
||||
@Deprecated
|
||||
private static void addParameters(Request request, Map<String, String> parameters) {
|
||||
Objects.requireNonNull(parameters, "parameters cannot be null");
|
||||
for (Map.Entry<String, String> entry : parameters.entrySet()) {
|
||||
|
|
|
@ -19,21 +19,21 @@
|
|||
|
||||
package org.elasticsearch.client;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpEntity;
|
||||
import org.apache.http.entity.ByteArrayEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.message.BasicHeader;
|
||||
import org.apache.http.nio.entity.NStringEntity;
|
||||
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
|
@ -127,31 +127,33 @@ public class RequestTests extends RestClientTestCase {
|
|||
assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset()));
|
||||
}
|
||||
|
||||
public void testSetHeaders() {
|
||||
public void testAddHeader() {
|
||||
final String method = randomFrom(new String[] {"GET", "PUT", "POST", "HEAD", "DELETE"});
|
||||
final String endpoint = randomAsciiLettersOfLengthBetween(1, 10);
|
||||
Request request = new Request(method, endpoint);
|
||||
|
||||
try {
|
||||
request.setHeaders((Header[]) null);
|
||||
request.addHeader(null, randomAsciiLettersOfLengthBetween(3, 10));
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("headers cannot be null", e.getMessage());
|
||||
assertEquals("header name cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
try {
|
||||
request.setHeaders(new Header [] {null});
|
||||
request.addHeader(randomAsciiLettersOfLengthBetween(3, 10), null);
|
||||
fail("expected failure");
|
||||
} catch (NullPointerException e) {
|
||||
assertEquals("header cannot be null", e.getMessage());
|
||||
assertEquals("header value cannot be null", e.getMessage());
|
||||
}
|
||||
|
||||
Header[] headers = new Header[between(0, 5)];
|
||||
for (int i = 0; i < headers.length; i++) {
|
||||
headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
|
||||
int numHeaders = between(0, 5);
|
||||
List<Header> headers = new ArrayList<>();
|
||||
for (int i = 0; i < numHeaders; i++) {
|
||||
Header header = new Request.ReqHeader(randomAsciiAlphanumOfLengthBetween(5, 10), randomAsciiAlphanumOfLength(3));
|
||||
headers.add(header);
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
request.setHeaders(headers);
|
||||
assertArrayEquals(headers, request.getHeaders());
|
||||
assertEquals(headers, new ArrayList<>(request.getHeaders()));
|
||||
}
|
||||
|
||||
public void testEqualsAndHashCode() {
|
||||
|
@ -168,7 +170,7 @@ public class RequestTests extends RestClientTestCase {
|
|||
assertNotEquals(mutant, request);
|
||||
}
|
||||
|
||||
private Request randomRequest() {
|
||||
private static Request randomRequest() {
|
||||
Request request = new Request(
|
||||
randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}),
|
||||
randomAsciiAlphanumOfLength(5));
|
||||
|
@ -192,11 +194,9 @@ public class RequestTests extends RestClientTestCase {
|
|||
|
||||
if (randomBoolean()) {
|
||||
int headerCount = between(1, 5);
|
||||
Header[] headers = new Header[headerCount];
|
||||
for (int i = 0; i < headerCount; i++) {
|
||||
headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
|
||||
request.addHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
|
||||
}
|
||||
request.setHeaders(headers);
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
@ -206,13 +206,13 @@ public class RequestTests extends RestClientTestCase {
|
|||
return request;
|
||||
}
|
||||
|
||||
private Request copy(Request request) {
|
||||
private static Request copy(Request request) {
|
||||
Request copy = new Request(request.getMethod(), request.getEndpoint());
|
||||
copyMutables(request, copy);
|
||||
return copy;
|
||||
}
|
||||
|
||||
private Request mutate(Request request) {
|
||||
private static Request mutate(Request request) {
|
||||
if (randomBoolean()) {
|
||||
// Mutate request or method but keep everything else constant
|
||||
Request mutant = randomBoolean()
|
||||
|
@ -231,11 +231,7 @@ public class RequestTests extends RestClientTestCase {
|
|||
mutant.setJsonEntity("mutant"); // randomRequest can't produce this value
|
||||
return mutant;
|
||||
case 2:
|
||||
if (mutant.getHeaders().length > 0) {
|
||||
mutant.setHeaders(new Header[0]);
|
||||
} else {
|
||||
mutant.setHeaders(new BasicHeader("extra", "m"));
|
||||
}
|
||||
mutant.addHeader("extra", "m");
|
||||
return mutant;
|
||||
case 3:
|
||||
mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5));
|
||||
|
@ -245,12 +241,14 @@ public class RequestTests extends RestClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private void copyMutables(Request from, Request to) {
|
||||
private static void copyMutables(Request from, Request to) {
|
||||
for (Map.Entry<String, String> param : from.getParameters().entrySet()) {
|
||||
to.addParameter(param.getKey(), param.getValue());
|
||||
}
|
||||
to.setEntity(from.getEntity());
|
||||
to.setHeaders(from.getHeaders());
|
||||
for (Header header : from.getHeaders()) {
|
||||
to.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.http.HttpHost;
|
|||
import org.apache.http.auth.AuthScope;
|
||||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.client.TargetAuthenticationStrategy;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
|
@ -379,7 +378,9 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
|
|||
String requestBody = "{ \"field\": \"value\" }";
|
||||
Request request = new Request(method, "/" + statusCode);
|
||||
request.setJsonEntity(requestBody);
|
||||
request.setHeaders(headers);
|
||||
for (Header header : headers) {
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
}
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(request);
|
||||
|
|
|
@ -312,7 +312,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}.
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeaders()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void tesPerformRequestOldStyleNullHeaders() throws IOException {
|
||||
|
@ -333,7 +333,7 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}.
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformRequestOldStyleWithNullParams() throws IOException {
|
||||
|
@ -362,7 +362,9 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
final Header[] requestHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
final int statusCode = randomStatusCode(getRandom());
|
||||
Request request = new Request(method, "/" + statusCode);
|
||||
request.setHeaders(requestHeaders);
|
||||
for (Header requestHeader : requestHeaders) {
|
||||
request.addHeader(requestHeader.getName(), requestHeader.getValue());
|
||||
}
|
||||
Response esResponse;
|
||||
try {
|
||||
esResponse = restClient.performRequest(request);
|
||||
|
@ -436,9 +438,9 @@ public class RestClientSingleHostTests extends RestClientTestCase {
|
|||
final Set<String> uniqueNames = new HashSet<>();
|
||||
if (randomBoolean()) {
|
||||
Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header");
|
||||
request.setHeaders(headers);
|
||||
for (Header header : headers) {
|
||||
expectedRequest.addHeader(header);
|
||||
request.addHeader(header.getName(), header.getValue());
|
||||
expectedRequest.addHeader(new Request.ReqHeader(header.getName(), header.getValue()));
|
||||
uniqueNames.add(header.getName());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,11 +27,13 @@ import java.io.IOException;
|
|||
import java.net.URI;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
|
@ -57,17 +59,20 @@ public class RestClientTests extends RestClientTestCase {
|
|||
restClient.performRequestAsync(new Request("unsupported", randomAsciiLettersOfLength(5)), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of unsupported method");
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
latch.countDown();
|
||||
try {
|
||||
assertThat(exception, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,17 +86,20 @@ public class RestClientTests extends RestClientTestCase {
|
|||
restClient.performRequestAsync("unsupported", randomAsciiLettersOfLength(5), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of unsupported method");
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
latch.countDown();
|
||||
try {
|
||||
assertThat(exception, instanceOf(UnsupportedOperationException.class));
|
||||
assertEquals("http method not supported: unsupported", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -105,22 +113,25 @@ public class RestClientTests extends RestClientTestCase {
|
|||
restClient.performRequestAsync(randomAsciiLettersOfLength(5), randomAsciiLettersOfLength(5), null, new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of null parameters");
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("parameters cannot be null", exception.getMessage());
|
||||
latch.countDown();
|
||||
try {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("parameters cannot be null", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetHeaders()}.
|
||||
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddHeader()}.
|
||||
*/
|
||||
@Deprecated
|
||||
public void testPerformOldStyleAsyncWithNullHeaders() throws Exception {
|
||||
|
@ -129,18 +140,21 @@ public class RestClientTests extends RestClientTestCase {
|
|||
ResponseListener listener = new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of null headers");
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("header cannot be null", exception.getMessage());
|
||||
latch.countDown();
|
||||
try {
|
||||
assertThat(exception, instanceOf(NullPointerException.class));
|
||||
assertEquals("header cannot be null", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
};
|
||||
restClient.performRequestAsync("GET", randomAsciiLettersOfLength(5), listener, (Header) null);
|
||||
latch.await();
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,17 +164,20 @@ public class RestClientTests extends RestClientTestCase {
|
|||
restClient.performRequestAsync(new Request("GET", "::http:///"), new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of wrong endpoint");
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(IllegalArgumentException.class));
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
latch.countDown();
|
||||
try {
|
||||
assertThat(exception, instanceOf(IllegalArgumentException.class));
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,17 +191,20 @@ public class RestClientTests extends RestClientTestCase {
|
|||
restClient.performRequestAsync("GET", "::http:///", new ResponseListener() {
|
||||
@Override
|
||||
public void onSuccess(Response response) {
|
||||
fail("should have failed because of wrong endpoint");
|
||||
throw new UnsupportedOperationException("onSuccess cannot be called when using a mocked http client");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception exception) {
|
||||
assertThat(exception, instanceOf(IllegalArgumentException.class));
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
latch.countDown();
|
||||
try {
|
||||
assertThat(exception, instanceOf(IllegalArgumentException.class));
|
||||
assertEquals("Expected scheme name at index 0: ::http:///", exception.getMessage());
|
||||
} finally {
|
||||
latch.countDown();
|
||||
}
|
||||
}
|
||||
});
|
||||
latch.await();
|
||||
assertTrue("time out waiting for request to return", latch.await(1000, TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,9 +27,7 @@ import org.apache.http.auth.AuthScope;
|
|||
import org.apache.http.auth.UsernamePasswordCredentials;
|
||||
import org.apache.http.client.CredentialsProvider;
|
||||
import org.apache.http.client.config.RequestConfig;
|
||||
import org.apache.http.entity.BasicHttpEntity;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.BasicCredentialsProvider;
|
||||
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
|
||||
import org.apache.http.impl.nio.reactor.IOReactorConfig;
|
||||
|
@ -52,8 +50,6 @@ import java.nio.file.Files;
|
|||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.security.KeyStore;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
|
@ -176,9 +172,8 @@ public class RestClientDocumentation {
|
|||
request.setJsonEntity("{\"json\":\"text\"}");
|
||||
//end::rest-client-body-shorter
|
||||
//tag::rest-client-headers
|
||||
request.setHeaders(
|
||||
new BasicHeader("Accept", "text/plain"),
|
||||
new BasicHeader("Cache-Control", "no-cache"));
|
||||
request.addHeader("Accept", "text/plain");
|
||||
request.addHeader("Cache-Control", "no-cache");
|
||||
//end::rest-client-headers
|
||||
//tag::rest-client-response-consumer
|
||||
request.setHttpAsyncResponseConsumerFactory(
|
||||
|
|
|
@ -100,7 +100,7 @@ final class RestClientTestUtil {
|
|||
if (random.nextBoolean()) {
|
||||
headerName = headerName + i;
|
||||
}
|
||||
headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiOfLengthBetween(random, 3, 10));
|
||||
headers[i] = new BasicHeader(headerName, RandomStrings.randomAsciiLettersOfLengthBetween(random, 3, 10));
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
call "%~dp0elasticsearch-env.bat" || exit /b 1
|
||||
|
||||
if defined ES_ADDITIONAL_SOURCES (
|
||||
for %%a in ("%ES_ADDITIONAL_SOURCES:;=","%") do (
|
||||
call %~dp0%%a
|
||||
)
|
||||
)
|
||||
|
||||
for /f "tokens=1*" %%a in ("%*") do (
|
||||
set main_class=%%a
|
||||
set arguments=%%b
|
||||
)
|
||||
|
||||
%JAVA% ^
|
||||
%ES_JAVA_OPTS% ^
|
||||
-Des.path.home="%ES_HOME%" ^
|
||||
-Des.path.conf="%ES_PATH_CONF%" ^
|
||||
-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^
|
||||
-Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^
|
||||
-cp "%ES_CLASSPATH%" ^
|
||||
%main_class% ^
|
||||
%arguments%
|
|
@ -3,17 +3,10 @@
|
|||
setlocal enabledelayedexpansion
|
||||
setlocal enableextensions
|
||||
|
||||
call "%~dp0elasticsearch-env.bat" || exit /b 1
|
||||
|
||||
%JAVA% ^
|
||||
%ES_JAVA_OPTS% ^
|
||||
-Des.path.home="%ES_HOME%" ^
|
||||
-Des.path.conf="%ES_PATH_CONF%" ^
|
||||
-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^
|
||||
-Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^
|
||||
-cp "%ES_CLASSPATH%" ^
|
||||
org.elasticsearch.common.settings.KeyStoreCli ^
|
||||
%*
|
||||
call "%~dp0elasticsearch-cli.bat" ^
|
||||
org.elasticsearch.common.settings.KeyStoreCli ^
|
||||
%* ^
|
||||
|| exit /b 1
|
||||
|
||||
endlocal
|
||||
endlocal
|
||||
|
|
|
@ -3,17 +3,10 @@
|
|||
setlocal enabledelayedexpansion
|
||||
setlocal enableextensions
|
||||
|
||||
call "%~dp0elasticsearch-env.bat" || exit /b 1
|
||||
|
||||
%JAVA% ^
|
||||
%ES_JAVA_OPTS% ^
|
||||
-Des.path.home="%ES_HOME%" ^
|
||||
-Des.path.conf="%ES_PATH_CONF%" ^
|
||||
-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^
|
||||
-Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^
|
||||
-cp "%ES_CLASSPATH%" ^
|
||||
call "%~dp0elasticsearch-cli.bat" ^
|
||||
org.elasticsearch.plugins.PluginCli ^
|
||||
%*
|
||||
%* ^
|
||||
|| exit /b 1
|
||||
|
||||
endlocal
|
||||
endlocal
|
||||
|
|
|
@ -3,17 +3,10 @@
|
|||
setlocal enabledelayedexpansion
|
||||
setlocal enableextensions
|
||||
|
||||
call "%~dp0elasticsearch-env.bat" || exit /b 1
|
||||
|
||||
%JAVA% ^
|
||||
%ES_JAVA_OPTS% ^
|
||||
-Des.path.home="%ES_HOME%" ^
|
||||
-Des.path.conf="%ES_PATH_CONF%" ^
|
||||
-Des.distribution.flavor="%ES_DISTRIBUTION_FLAVOR%" ^
|
||||
-Des.distribution.type="%ES_DISTRIBUTION_TYPE%" ^
|
||||
-cp "%ES_CLASSPATH%" ^
|
||||
call "%~dp0elasticsearch-cli.bat" ^
|
||||
org.elasticsearch.index.translog.TranslogToolCli ^
|
||||
%*
|
||||
%* ^
|
||||
|| exit /b 1
|
||||
|
||||
endlocal
|
||||
endlocal
|
||||
|
|
|
@ -30,7 +30,6 @@ import org.elasticsearch.cli.EnvironmentAwareCommand;
|
|||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.hash.MessageDigests;
|
||||
|
@ -240,7 +239,7 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
/** Downloads the plugin and returns the file it was downloaded to. */
|
||||
private Path download(Terminal terminal, String pluginId, Path tmpDir) throws Exception {
|
||||
if (OFFICIAL_PLUGINS.contains(pluginId)) {
|
||||
final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, pluginId, Platforms.PLATFORM_NAME);
|
||||
final String url = getElasticUrl(terminal, getStagingHash(), Version.CURRENT, isSnapshot(), pluginId, Platforms.PLATFORM_NAME);
|
||||
terminal.println("-> Downloading " + pluginId + " from elastic");
|
||||
return downloadZipAndChecksum(terminal, url, tmpDir, false);
|
||||
}
|
||||
|
@ -272,22 +271,43 @@ class InstallPluginCommand extends EnvironmentAwareCommand {
|
|||
return System.getProperty(PROPERTY_STAGING_ID);
|
||||
}
|
||||
|
||||
boolean isSnapshot() {
|
||||
return Build.CURRENT.isSnapshot();
|
||||
}
|
||||
|
||||
/** Returns the url for an official elasticsearch plugin. */
|
||||
private String getElasticUrl(Terminal terminal, String stagingHash, Version version,
|
||||
String pluginId, String platform) throws IOException {
|
||||
private String getElasticUrl(
|
||||
final Terminal terminal,
|
||||
final String stagingHash,
|
||||
final Version version,
|
||||
final boolean isSnapshot,
|
||||
final String pluginId,
|
||||
final String platform) throws IOException, UserException {
|
||||
final String baseUrl;
|
||||
if (stagingHash != null) {
|
||||
baseUrl = String.format(Locale.ROOT,
|
||||
"https://staging.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", version, stagingHash, pluginId);
|
||||
} else {
|
||||
baseUrl = String.format(Locale.ROOT,
|
||||
"https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId);
|
||||
if (isSnapshot && stagingHash == null) {
|
||||
throw new UserException(
|
||||
ExitCodes.CONFIG, "attempted to install release build of official plugin on snapshot build of Elasticsearch");
|
||||
}
|
||||
final String platformUrl = String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, version);
|
||||
if (stagingHash != null) {
|
||||
if (isSnapshot) {
|
||||
baseUrl = nonReleaseUrl("snapshots", version, stagingHash, pluginId);
|
||||
} else {
|
||||
baseUrl = nonReleaseUrl("staging", version, stagingHash, pluginId);
|
||||
}
|
||||
} else {
|
||||
baseUrl = String.format(Locale.ROOT, "https://artifacts.elastic.co/downloads/elasticsearch-plugins/%s", pluginId);
|
||||
}
|
||||
final String platformUrl =
|
||||
String.format(Locale.ROOT, "%s/%s-%s-%s.zip", baseUrl, pluginId, platform, Version.displayVersion(version, isSnapshot));
|
||||
if (urlExists(terminal, platformUrl)) {
|
||||
return platformUrl;
|
||||
}
|
||||
return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, version);
|
||||
return String.format(Locale.ROOT, "%s/%s-%s.zip", baseUrl, pluginId, Version.displayVersion(version, isSnapshot));
|
||||
}
|
||||
|
||||
private String nonReleaseUrl(final String hostname, final Version version, final String stagingHash, final String pluginId) {
|
||||
return String.format(
|
||||
Locale.ROOT, "https://%s.elastic.co/%s-%s/downloads/elasticsearch-plugins/%s", hostname, version, stagingHash, pluginId);
|
||||
}
|
||||
|
||||
/** Returns the url for an elasticsearch plugin in maven. */
|
||||
|
|
|
@ -800,7 +800,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
skipJarHellCommand.execute(terminal, pluginZip, isBatch, env.v2());
|
||||
}
|
||||
|
||||
void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash,
|
||||
void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash, boolean isSnapshot,
|
||||
String shaExtension, Function<byte[], String> shaCalculator) throws Exception {
|
||||
Tuple<Path, Environment> env = createEnv(fs, temp);
|
||||
Path pluginDir = createPluginDir(temp);
|
||||
|
@ -834,6 +834,12 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
String getStagingHash() {
|
||||
return stagingHash;
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean isSnapshot() {
|
||||
return isSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
void jarHellCheck(PluginInfo candidateInfo, Path candidate, Path pluginsDir, Path modulesDir) throws Exception {
|
||||
// no jarhell check
|
||||
|
@ -843,48 +849,82 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
assertPlugin(name, pluginDir, env.v2());
|
||||
}
|
||||
|
||||
public void assertInstallPluginFromUrl(String pluginId, String name, String url, String stagingHash) throws Exception {
|
||||
public void assertInstallPluginFromUrl(
|
||||
final String pluginId, final String name, final String url, final String stagingHash, boolean isSnapshot) throws Exception {
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-512");
|
||||
assertInstallPluginFromUrl(pluginId, name, url, stagingHash, ".sha512", checksumAndFilename(digest, url));
|
||||
assertInstallPluginFromUrl(pluginId, name, url, stagingHash, isSnapshot, ".sha512", checksumAndFilename(digest, url));
|
||||
}
|
||||
|
||||
public void testOfficalPlugin() throws Exception {
|
||||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null);
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false);
|
||||
}
|
||||
|
||||
public void testOfficialPluginSnapshot() throws Exception {
|
||||
String url = String.format(
|
||||
Locale.ROOT,
|
||||
"https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip",
|
||||
Version.CURRENT,
|
||||
Version.displayVersion(Version.CURRENT, true));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true);
|
||||
}
|
||||
|
||||
public void testInstallReleaseBuildOfPluginOnSnapshotBuild() {
|
||||
String url = String.format(
|
||||
Locale.ROOT,
|
||||
"https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s.zip",
|
||||
Version.CURRENT,
|
||||
Version.displayVersion(Version.CURRENT, true));
|
||||
// attemping to install a release build of a plugin (no staging ID) on a snapshot build should throw a user exception
|
||||
final UserException e =
|
||||
expectThrows(UserException.class, () -> assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, true));
|
||||
assertThat(e.exitCode, equalTo(ExitCodes.CONFIG));
|
||||
assertThat(
|
||||
e, hasToString(containsString("attempted to install release build of official plugin on snapshot build of Elasticsearch")));
|
||||
}
|
||||
|
||||
public void testOfficalPluginStaging() throws Exception {
|
||||
String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-"
|
||||
+ Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123");
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false);
|
||||
}
|
||||
|
||||
public void testOfficalPlatformPlugin() throws Exception {
|
||||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Platforms.PLATFORM_NAME +
|
||||
"-" + Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null);
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false);
|
||||
}
|
||||
|
||||
public void testOfficialPlatformPluginSnapshot() throws Exception {
|
||||
String url = String.format(
|
||||
Locale.ROOT,
|
||||
"https://snapshots.elastic.co/%s-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-%s-%s.zip",
|
||||
Version.CURRENT,
|
||||
Platforms.PLATFORM_NAME,
|
||||
Version.displayVersion(Version.CURRENT, true));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", true);
|
||||
}
|
||||
|
||||
public void testOfficalPlatformPluginStaging() throws Exception {
|
||||
String url = "https://staging.elastic.co/" + Version.CURRENT + "-abc123/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-"
|
||||
+ Platforms.PLATFORM_NAME + "-"+ Version.CURRENT + ".zip";
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123");
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, "abc123", false);
|
||||
}
|
||||
|
||||
public void testMavenPlugin() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null);
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false);
|
||||
}
|
||||
|
||||
public void testMavenPlatformPlugin() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-" + Platforms.PLATFORM_NAME + "-1.0.0.zip";
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null);
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false);
|
||||
}
|
||||
|
||||
public void testMavenSha1Backcompat() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-1");
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", checksum(digest));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", checksum(digest));
|
||||
assertTrue(terminal.getOutput(), terminal.getOutput().contains("sha512 not found, falling back to sha1"));
|
||||
}
|
||||
|
||||
|
@ -892,7 +932,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-1");
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, ".sha1", checksum(digest)));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha1", checksum(digest)));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertEquals("Plugin checksum missing: " + url + ".sha512", e.getMessage());
|
||||
}
|
||||
|
@ -900,7 +940,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
public void testMavenShaMissing() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".dne", bytes -> null));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".dne", bytes -> null));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertEquals("Plugin checksum missing: " + url + ".sha1", e.getMessage());
|
||||
}
|
||||
|
@ -909,7 +949,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
String url = "https://artifacts.elastic.co/downloads/elasticsearch-plugins/analysis-icu/analysis-icu-" + Version.CURRENT + ".zip";
|
||||
MessageDigest digest = MessageDigest.getInstance("SHA-512");
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, ".sha512", checksum(digest)));
|
||||
assertInstallPluginFromUrl("analysis-icu", "analysis-icu", url, null, false, ".sha512", checksum(digest)));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertTrue(e.getMessage(), e.getMessage().startsWith("Invalid checksum file"));
|
||||
}
|
||||
|
@ -923,6 +963,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"analysis-icu",
|
||||
url,
|
||||
null,
|
||||
false,
|
||||
".sha512",
|
||||
checksumAndString(digest, " repository-s3-" + Version.CURRENT + ".zip")));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
|
@ -938,6 +979,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"analysis-icu",
|
||||
url,
|
||||
null,
|
||||
false,
|
||||
".sha512",
|
||||
checksumAndString(digest, " analysis-icu-" + Version.CURRENT + ".zip\nfoobar")));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
|
@ -952,6 +994,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
"analysis-icu",
|
||||
url,
|
||||
null,
|
||||
false,
|
||||
".sha512",
|
||||
bytes -> "foobar analysis-icu-" + Version.CURRENT + ".zip"));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
|
@ -961,7 +1004,7 @@ public class InstallPluginCommandTests extends ESTestCase {
|
|||
public void testSha1Mismatch() throws Exception {
|
||||
String url = "https://repo1.maven.org/maven2/mygroup/myplugin/1.0.0/myplugin-1.0.0.zip";
|
||||
UserException e = expectThrows(UserException.class, () ->
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, ".sha1", bytes -> "foobar"));
|
||||
assertInstallPluginFromUrl("mygroup:myplugin:1.0.0", "myplugin", url, null, false, ".sha1", bytes -> "foobar"));
|
||||
assertEquals(ExitCodes.IO_ERROR, e.exitCode);
|
||||
assertTrue(e.getMessage(), e.getMessage().contains("SHA-1 mismatch, expected foobar"));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
[[java-rest-high-snapshot-delete-repository]]
|
||||
=== Snapshot Delete Repository API
|
||||
|
||||
The Snapshot Delete Repository API allows to delete a registered repository.
|
||||
|
||||
[[java-rest-high-snapshot-delete-repository-request]]
|
||||
==== Snapshot Delete Repository Request
|
||||
|
||||
A `DeleteRepositoryRequest`:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-request]
|
||||
--------------------------------------------------
|
||||
|
||||
==== Optional Arguments
|
||||
The following arguments can optionally be provided:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `TimeValue`
|
||||
<2> Timeout to wait for the all the nodes to acknowledge the settings were applied
|
||||
as a `String`
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-request-masterTimeout]
|
||||
--------------------------------------------------
|
||||
<1> Timeout to connect to the master node as a `TimeValue`
|
||||
<2> Timeout to connect to the master node as a `String`
|
||||
|
||||
[[java-rest-high-snapshot-delete-repository-sync]]
|
||||
==== Synchronous Execution
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute]
|
||||
--------------------------------------------------
|
||||
|
||||
[[java-rest-high-snapshot-delete-repository-async]]
|
||||
==== Asynchronous Execution
|
||||
|
||||
The asynchronous execution of a snapshot delete repository requires both the
|
||||
`DeleteRepositoryRequest` instance and an `ActionListener` instance to be
|
||||
passed to the asynchronous method:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute-async]
|
||||
--------------------------------------------------
|
||||
<1> The `DeleteRepositoryRequest` to execute and the `ActionListener`
|
||||
to use when the execution completes
|
||||
|
||||
The asynchronous method does not block and returns immediately. Once it is
|
||||
completed the `ActionListener` is called back using the `onResponse` method
|
||||
if the execution successfully completed or using the `onFailure` method if
|
||||
it failed.
|
||||
|
||||
A typical listener for `DeleteRepositoryResponse` looks like:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-execute-listener]
|
||||
--------------------------------------------------
|
||||
<1> Called when the execution is successfully completed. The response is
|
||||
provided as an argument
|
||||
<2> Called in case of a failure. The raised exception is provided as an argument
|
||||
|
||||
[[java-rest-high-cluster-delete-repository-response]]
|
||||
==== Snapshot Delete Repository Response
|
||||
|
||||
The returned `DeleteRepositoryResponse` allows to retrieve information about the
|
||||
executed operation as follows:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[delete-repository-response]
|
||||
--------------------------------------------------
|
||||
<1> Indicates the node has acknowledged the request
|
|
@ -114,6 +114,9 @@ include::cluster/list_tasks.asciidoc[]
|
|||
The Java High Level REST Client supports the following Snapshot APIs:
|
||||
|
||||
* <<java-rest-high-snapshot-get-repository>>
|
||||
* <<java-rest-high-snapshot-create-repository>>
|
||||
* <<java-rest-high-snapshot-delete-repository>>
|
||||
|
||||
include::snapshot/get_repository.asciidoc[]
|
||||
include::snapshot/create_repository.asciidoc[]
|
||||
include::snapshot/delete_repository.asciidoc[]
|
||||
|
|
|
@ -271,7 +271,7 @@ a `ContentType` of `application/json`.
|
|||
include-tagged::{doc-tests}/RestClientDocumentation.java[rest-client-body-shorter]
|
||||
--------------------------------------------------
|
||||
|
||||
And you can set a list of headers to send with the request:
|
||||
And you can add one or more headers to send with the request:
|
||||
|
||||
["source","java",subs="attributes,callouts,macros"]
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -11,7 +11,7 @@ The Painless execute API allows an arbitrary script to be executed and a result
|
|||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `script` | yes | - | The script to execute
|
||||
| `context` | no | `execute_api_script` | The context the script should be executed in.
|
||||
| `context` | no | `painless_test` | The context the script should be executed in.
|
||||
|======
|
||||
|
||||
==== Contexts
|
||||
|
|
|
@ -84,7 +84,7 @@ When `proxy.type` is set to `http` or `socks`, `proxy.host` and `proxy.port` mus
|
|||
|
||||
|
||||
[[repository-azure-repository-settings]]
|
||||
===== Repository settings
|
||||
==== Repository settings
|
||||
|
||||
The Azure repository supports following settings:
|
||||
|
||||
|
@ -178,7 +178,7 @@ client.admin().cluster().preparePutRepository("my_backup_java1")
|
|||
----
|
||||
|
||||
[[repository-azure-validation]]
|
||||
===== Repository validation rules
|
||||
==== Repository validation rules
|
||||
|
||||
According to the http://msdn.microsoft.com/en-us/library/dd135715.aspx[containers naming guide], a container name must
|
||||
be a valid DNS name, conforming to the following naming rules:
|
||||
|
|
|
@ -378,7 +378,8 @@ PUT /catalan_example
|
|||
"filter": {
|
||||
"catalan_elision": {
|
||||
"type": "elision",
|
||||
"articles": [ "d", "l", "m", "n", "s", "t"]
|
||||
"articles": [ "d", "l", "m", "n", "s", "t"],
|
||||
"articles_case": true
|
||||
},
|
||||
"catalan_stop": {
|
||||
"type": "stop",
|
||||
|
@ -1156,7 +1157,8 @@ PUT /italian_example
|
|||
"nell", "sull", "coll", "pell",
|
||||
"gl", "agl", "dagl", "degl", "negl",
|
||||
"sugl", "un", "m", "t", "s", "v", "d"
|
||||
]
|
||||
],
|
||||
"articles_case": true
|
||||
},
|
||||
"italian_stop": {
|
||||
"type": "stop",
|
||||
|
|
|
@ -103,6 +103,11 @@ The `simple_pattern` tokenizer uses a regular expression to capture matching
|
|||
text as terms. It uses a restricted subset of regular expression features
|
||||
and is generally faster than the `pattern` tokenizer.
|
||||
|
||||
<<analysis-chargroup-tokenizer,Char Group Tokenizer>>::
|
||||
|
||||
The `char_group` tokenizer is configurable through sets of characters to split
|
||||
on, which is usually less expensive than running regular expressions.
|
||||
|
||||
<<analysis-simplepatternsplit-tokenizer,Simple Pattern Split Tokenizer>>::
|
||||
|
||||
The `simple_pattern_split` tokenizer uses the same restricted regular expression
|
||||
|
@ -143,6 +148,8 @@ include::tokenizers/keyword-tokenizer.asciidoc[]
|
|||
|
||||
include::tokenizers/pattern-tokenizer.asciidoc[]
|
||||
|
||||
include::tokenizers/chargroup-tokenizer.asciidoc[]
|
||||
|
||||
include::tokenizers/simplepattern-tokenizer.asciidoc[]
|
||||
|
||||
include::tokenizers/simplepatternsplit-tokenizer.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,80 @@
|
|||
[[analysis-chargroup-tokenizer]]
|
||||
=== Char Group Tokenizer
|
||||
|
||||
The `char_group` tokenizer breaks text into terms whenever it encounters a
|
||||
character which is in a defined set. It is mostly useful for cases where a simple
|
||||
custom tokenization is desired, and the overhead of use of the <<analysis-pattern-tokenizer, `pattern` tokenizer>>
|
||||
is not acceptable.
|
||||
|
||||
[float]
|
||||
=== Configuration
|
||||
|
||||
The `char_group` tokenizer accepts one parameter:
|
||||
|
||||
[horizontal]
|
||||
`tokenize_on_chars`::
|
||||
A list containing a list of characters to tokenize the string on. Whenever a character
|
||||
from this list is encountered, a new token is started. This accepts either single
|
||||
characters like eg. `-`, or character groups: `whitespace`, `letter`, `digit`,
|
||||
`punctuation`, `symbol`.
|
||||
|
||||
|
||||
[float]
|
||||
=== Example output
|
||||
|
||||
[source,js]
|
||||
---------------------------
|
||||
POST _analyze
|
||||
{
|
||||
"tokenizer": {
|
||||
"type": "char_group",
|
||||
"tokenize_on_chars": [
|
||||
"whitespace",
|
||||
"-",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
"text": "The QUICK brown-fox"
|
||||
}
|
||||
---------------------------
|
||||
// CONSOLE
|
||||
|
||||
returns
|
||||
|
||||
[source,js]
|
||||
---------------------------
|
||||
{
|
||||
"tokens": [
|
||||
{
|
||||
"token": "The",
|
||||
"start_offset": 0,
|
||||
"end_offset": 3,
|
||||
"type": "word",
|
||||
"position": 0
|
||||
},
|
||||
{
|
||||
"token": "QUICK",
|
||||
"start_offset": 4,
|
||||
"end_offset": 9,
|
||||
"type": "word",
|
||||
"position": 1
|
||||
},
|
||||
{
|
||||
"token": "brown",
|
||||
"start_offset": 10,
|
||||
"end_offset": 15,
|
||||
"type": "word",
|
||||
"position": 2
|
||||
},
|
||||
{
|
||||
"token": "fox",
|
||||
"start_offset": 16,
|
||||
"end_offset": 19,
|
||||
"type": "word",
|
||||
"position": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
---------------------------
|
||||
// TESTRESPONSE
|
||||
|
|
@ -10,7 +10,7 @@ You need to use settings which are starting with `azure.client.` prefix instead.
|
|||
* Global timeout setting `cloud.azure.storage.timeout` has been removed.
|
||||
You must set it per azure client instead. Like `azure.client.default.timeout: 10s` for example.
|
||||
|
||||
See {plugins}/repository-azure-usage.html#repository-azure-repository-settings[Azure Repository settings].
|
||||
See {plugins}/repository-azure-repository-settings.html#repository-azure-repository-settings[Azure Repository settings].
|
||||
|
||||
==== Google Cloud Storage Repository plugin
|
||||
|
||||
|
|
|
@ -29,6 +29,14 @@
|
|||
[[remove-http-enabled]]
|
||||
==== Http enabled setting removed
|
||||
|
||||
The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing
|
||||
* The setting `http.enabled` previously allowed disabling binding to HTTP, only allowing
|
||||
use of the transport client. This setting has been removed, as the transport client
|
||||
will be removed in the future, thus requiring HTTP to always be enabled.
|
||||
|
||||
[[remove-http-pipelining-setting]]
|
||||
==== Http pipelining setting removed
|
||||
|
||||
* The setting `http.pipelining` previously allowed disabling HTTP pipelining support.
|
||||
This setting has been removed, as disabling http pipelining support on the server
|
||||
provided little value. The setting `http.pipelining.max_events` can still be used to
|
||||
limit the number of pipelined requests in-flight.
|
||||
|
|
|
@ -96,8 +96,6 @@ and stack traces in response output. Note: When set to `false` and the `error_tr
|
|||
parameter is specified, an error will be returned; when `error_trace` is not specified, a
|
||||
simple message will be returned. Defaults to `true`
|
||||
|
||||
|`http.pipelining` |Enable or disable HTTP pipelining, defaults to `true`.
|
||||
|
||||
|`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`.
|
||||
|
||||
|`http.max_warning_header_count` |The maximum number of warning headers in
|
||||
|
|
|
@ -76,7 +76,7 @@ memory on a node. The memory usage is based on the content length of the request
|
|||
[float]
|
||||
==== Accounting requests circuit breaker
|
||||
|
||||
The in flight requests circuit breaker allows Elasticsearch to limit the memory
|
||||
The accounting circuit breaker allows Elasticsearch to limit the memory
|
||||
usage of things held in memory that are not released when a request is
|
||||
completed. This includes things like the Lucene segment memory.
|
||||
|
||||
|
|
|
@ -124,8 +124,8 @@ the shared file system repository it is necessary to mount the same shared files
|
|||
master and data nodes. This location (or one of its parent directories) must be registered in the `path.repo`
|
||||
setting on all master and data nodes.
|
||||
|
||||
Assuming that the shared filesystem is mounted to `/mount/backups/my_backup`, the following setting should be added to
|
||||
`elasticsearch.yml` file:
|
||||
Assuming that the shared filesystem is mounted to `/mount/backups/my_fs_backup_location`, the following setting should
|
||||
be added to `elasticsearch.yml` file:
|
||||
|
||||
[source,yaml]
|
||||
--------------
|
||||
|
@ -141,7 +141,7 @@ path.repo: ["\\\\MY_SERVER\\Snapshots"]
|
|||
--------------
|
||||
|
||||
After all nodes are restarted, the following command can be used to register the shared file system repository with
|
||||
the name `my_backup`:
|
||||
the name `my_fs_backup`:
|
||||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
|
@ -419,7 +419,7 @@ A repository can be unregistered using the following command:
|
|||
|
||||
[source,sh]
|
||||
-----------------------------------
|
||||
DELETE /_snapshot/my_fs_backup
|
||||
DELETE /_snapshot/my_backup
|
||||
-----------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
|
|
@ -15,13 +15,13 @@ GET /_search
|
|||
"test1" : {
|
||||
"script" : {
|
||||
"lang": "painless",
|
||||
"source": "doc['my_field_name'].value * 2"
|
||||
"source": "doc['price'].value * 2"
|
||||
}
|
||||
},
|
||||
"test2" : {
|
||||
"script" : {
|
||||
"lang": "painless",
|
||||
"source": "doc['my_field_name'].value * params.factor",
|
||||
"source": "doc['price'].value * params.factor",
|
||||
"params" : {
|
||||
"factor" : 2.0
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ GET /_search
|
|||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
// TEST[setup:sales]
|
||||
|
||||
Script fields can work on fields that are not stored (`my_field_name` in
|
||||
the above case), and allow to return custom values to be returned (the
|
||||
|
|
|
@ -33,8 +33,6 @@ publishing {
|
|||
}
|
||||
|
||||
dependencies {
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
|
||||
|
||||
testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}"
|
||||
testCompile "junit:junit:${versions.junit}"
|
||||
testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}"
|
||||
|
@ -64,18 +62,3 @@ forbiddenApisMain {
|
|||
// es-all is not checked as we connect and accept sockets
|
||||
signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')]
|
||||
}
|
||||
|
||||
//JarHell is part of es core, which we don't want to pull in
|
||||
jarHell.enabled=false
|
||||
|
||||
thirdPartyAudit.excludes = [
|
||||
'org/osgi/framework/AdaptPermission',
|
||||
'org/osgi/framework/AdminPermission',
|
||||
'org/osgi/framework/Bundle',
|
||||
'org/osgi/framework/BundleActivator',
|
||||
'org/osgi/framework/BundleContext',
|
||||
'org/osgi/framework/BundleEvent',
|
||||
'org/osgi/framework/SynchronousBundleListener',
|
||||
'org/osgi/framework/wiring/BundleWire',
|
||||
'org/osgi/framework/wiring/BundleWiring'
|
||||
]
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
7a2999229464e7a324aa503c0a52ec0f05efe7bd
|
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 1999-2005 The Apache Software Foundation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,5 +0,0 @@
|
|||
Apache log4j
|
||||
Copyright 2007 The Apache Software Foundation
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
|
@ -19,11 +19,9 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
|
@ -33,8 +31,8 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
|
||||
private final Supplier<SocketSelector> selectorSupplier;
|
||||
|
||||
public AcceptorEventHandler(Logger logger, Supplier<SocketSelector> selectorSupplier) {
|
||||
super(logger);
|
||||
public AcceptorEventHandler(Supplier<SocketSelector> selectorSupplier, Consumer<Exception> exceptionHandler) {
|
||||
super(exceptionHandler);
|
||||
this.selectorSupplier = selectorSupplier;
|
||||
}
|
||||
|
||||
|
@ -58,7 +56,7 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void registrationException(ServerChannelContext context, Exception exception) {
|
||||
logger.error(new ParameterizedMessage("failed to register server channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -78,7 +76,6 @@ public class AcceptorEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void acceptException(ServerChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while accepting new channel from server channel: {}",
|
||||
context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ public abstract class ESSelector implements Closeable {
|
|||
try {
|
||||
selector.close();
|
||||
} catch (IOException e) {
|
||||
eventHandler.closeSelectorException(e);
|
||||
eventHandler.selectorException(e);
|
||||
} finally {
|
||||
runLock.unlock();
|
||||
exitedLoop.countDown();
|
||||
|
@ -123,7 +123,7 @@ public abstract class ESSelector implements Closeable {
|
|||
throw e;
|
||||
}
|
||||
} catch (IOException e) {
|
||||
eventHandler.selectException(e);
|
||||
eventHandler.selectorException(e);
|
||||
} catch (Exception e) {
|
||||
eventHandler.uncaughtException(e);
|
||||
}
|
||||
|
|
|
@ -19,37 +19,26 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.Selector;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
public abstract class EventHandler {
|
||||
|
||||
protected final Logger logger;
|
||||
protected final Consumer<Exception> exceptionHandler;
|
||||
|
||||
EventHandler(Logger logger) {
|
||||
this.logger = logger;
|
||||
protected EventHandler(Consumer<Exception> exceptionHandler) {
|
||||
this.exceptionHandler = exceptionHandler;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method handles an IOException that was thrown during a call to {@link Selector#select(long)}.
|
||||
* This method handles an IOException that was thrown during a call to {@link Selector#select(long)} or
|
||||
* {@link Selector#close()}.
|
||||
*
|
||||
* @param exception the exception
|
||||
*/
|
||||
protected void selectException(IOException exception) {
|
||||
logger.warn(new ParameterizedMessage("io exception during select [thread={}]", Thread.currentThread().getName()), exception);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method handles an IOException that was thrown during a call to {@link Selector#close()}.
|
||||
*
|
||||
* @param exception the exception
|
||||
*/
|
||||
protected void closeSelectorException(IOException exception) {
|
||||
logger.warn(new ParameterizedMessage("io exception while closing selector [thread={}]", Thread.currentThread().getName()),
|
||||
exception);
|
||||
protected void selectorException(IOException exception) {
|
||||
exceptionHandler.accept(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -79,11 +68,11 @@ public abstract class EventHandler {
|
|||
/**
|
||||
* This method is called when an attempt to close a channel throws an exception.
|
||||
*
|
||||
* @param context that was being closed
|
||||
* @param channel that was being closed
|
||||
* @param exception that occurred
|
||||
*/
|
||||
protected void closeException(ChannelContext<?> context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", context.getChannel()), exception);
|
||||
protected void closeException(ChannelContext<?> channel, Exception exception) {
|
||||
channel.handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -95,6 +84,6 @@ public abstract class EventHandler {
|
|||
* @param exception that was thrown
|
||||
*/
|
||||
protected void genericChannelException(ChannelContext<?> channel, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while handling event for channel: {}", channel.getChannel()), exception);
|
||||
channel.handleException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.nio.utils.ExceptionsHelper;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -29,7 +28,6 @@ import java.util.List;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -56,16 +54,16 @@ public class NioGroup implements AutoCloseable {
|
|||
|
||||
private final AtomicBoolean isOpen = new AtomicBoolean(true);
|
||||
|
||||
public NioGroup(Logger logger, ThreadFactory acceptorThreadFactory, int acceptorCount,
|
||||
BiFunction<Logger, Supplier<SocketSelector>, AcceptorEventHandler> acceptorEventHandlerFunction,
|
||||
public NioGroup(ThreadFactory acceptorThreadFactory, int acceptorCount,
|
||||
Function<Supplier<SocketSelector>, AcceptorEventHandler> acceptorEventHandlerFunction,
|
||||
ThreadFactory socketSelectorThreadFactory, int socketSelectorCount,
|
||||
Function<Logger, SocketEventHandler> socketEventHandlerFunction) throws IOException {
|
||||
Supplier<SocketEventHandler> socketEventHandlerFunction) throws IOException {
|
||||
acceptors = new ArrayList<>(acceptorCount);
|
||||
socketSelectors = new ArrayList<>(socketSelectorCount);
|
||||
|
||||
try {
|
||||
for (int i = 0; i < socketSelectorCount; ++i) {
|
||||
SocketSelector selector = new SocketSelector(socketEventHandlerFunction.apply(logger));
|
||||
SocketSelector selector = new SocketSelector(socketEventHandlerFunction.get());
|
||||
socketSelectors.add(selector);
|
||||
}
|
||||
startSelectors(socketSelectors, socketSelectorThreadFactory);
|
||||
|
@ -73,7 +71,7 @@ public class NioGroup implements AutoCloseable {
|
|||
for (int i = 0; i < acceptorCount; ++i) {
|
||||
SocketSelector[] childSelectors = this.socketSelectors.toArray(new SocketSelector[this.socketSelectors.size()]);
|
||||
Supplier<SocketSelector> selectorSupplier = new RoundRobinSupplier<>(childSelectors);
|
||||
AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(logger, selectorSupplier));
|
||||
AcceptingSelector acceptor = new AcceptingSelector(acceptorEventHandlerFunction.apply(selectorSupplier));
|
||||
acceptors.add(acceptor);
|
||||
}
|
||||
startSelectors(acceptors, acceptorThreadFactory);
|
||||
|
|
|
@ -19,23 +19,17 @@
|
|||
|
||||
package org.elasticsearch.nio;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
/**
|
||||
* Event handler designed to handle events from non-server sockets
|
||||
*/
|
||||
public class SocketEventHandler extends EventHandler {
|
||||
|
||||
private final Logger logger;
|
||||
|
||||
public SocketEventHandler(Logger logger) {
|
||||
super(logger);
|
||||
this.logger = logger;
|
||||
public SocketEventHandler(Consumer<Exception> exceptionHandler) {
|
||||
super(exceptionHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -62,7 +56,6 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void registrationException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to register socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
|
@ -85,7 +78,6 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void connectException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("failed to connect to socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
|
@ -106,7 +98,6 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void readException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while reading from socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
|
@ -127,18 +118,16 @@ public class SocketEventHandler extends EventHandler {
|
|||
* @param exception that occurred
|
||||
*/
|
||||
protected void writeException(SocketChannelContext context, Exception exception) {
|
||||
logger.debug(() -> new ParameterizedMessage("exception while writing to socket channel: {}", context.getChannel()), exception);
|
||||
context.handleException(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
* This method is called when a listener attached to a channel operation throws an exception.
|
||||
*
|
||||
* @param listener that was called
|
||||
* @param exception that occurred
|
||||
*/
|
||||
protected <V> void listenerException(BiConsumer<V, Throwable> listener, Exception exception) {
|
||||
logger.warn(new ParameterizedMessage("exception while executing listener: {}", listener), exception);
|
||||
protected void listenerException(Exception exception) {
|
||||
exceptionHandler.accept(exception);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -143,7 +143,7 @@ public class SocketSelector extends ESSelector {
|
|||
try {
|
||||
listener.accept(value, null);
|
||||
} catch (Exception e) {
|
||||
eventHandler.listenerException(listener, e);
|
||||
eventHandler.listenerException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ public class SocketSelector extends ESSelector {
|
|||
try {
|
||||
listener.accept(null, exception);
|
||||
} catch (Exception e) {
|
||||
eventHandler.listenerException(listener, e);
|
||||
eventHandler.listenerException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ public class AcceptorEventHandlerTests extends ESTestCase {
|
|||
ArrayList<SocketSelector> selectors = new ArrayList<>();
|
||||
selectors.add(mock(SocketSelector.class));
|
||||
selectorSupplier = new RoundRobinSupplier<>(selectors.toArray(new SocketSelector[selectors.size()]));
|
||||
handler = new AcceptorEventHandler(logger, selectorSupplier);
|
||||
handler = new AcceptorEventHandler(selectorSupplier, mock(Consumer.class));
|
||||
|
||||
channel = new NioServerSocketChannel(mock(ServerSocketChannel.class));
|
||||
context = new DoNotRegisterContext(channel, mock(AcceptingSelector.class), mock(Consumer.class));
|
||||
|
@ -99,6 +99,14 @@ public class AcceptorEventHandlerTests extends ESTestCase {
|
|||
verify(serverChannelContext).acceptChannels(selectorSupplier);
|
||||
}
|
||||
|
||||
public void testAcceptExceptionCallsExceptionHandler() throws IOException {
|
||||
ServerChannelContext serverChannelContext = mock(ServerChannelContext.class);
|
||||
IOException exception = new IOException();
|
||||
handler.acceptException(serverChannelContext, exception);
|
||||
|
||||
verify(serverChannelContext).handleException(exception);
|
||||
}
|
||||
|
||||
private class DoNotRegisterContext extends ServerChannelContext {
|
||||
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@ import java.nio.channels.CancelledKeyException;
|
|||
import java.nio.channels.ClosedSelectorException;
|
||||
import java.nio.channels.SelectionKey;
|
||||
import java.nio.channels.Selector;
|
||||
import java.nio.channels.SocketChannel;
|
||||
|
||||
import static org.mockito.Matchers.anyInt;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -81,7 +80,7 @@ public class ESSelectorTests extends ESTestCase {
|
|||
|
||||
this.selector.singleLoop();
|
||||
|
||||
verify(handler).selectException(ioException);
|
||||
verify(handler).selectorException(ioException);
|
||||
}
|
||||
|
||||
public void testSelectorClosedIfOpenAndEventLoopNotRunning() throws IOException {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.test.ESTestCase;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
@ -34,10 +35,12 @@ public class NioGroupTests extends ESTestCase {
|
|||
private NioGroup nioGroup;
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(Settings.EMPTY, "acceptor"), 1, AcceptorEventHandler::new,
|
||||
daemonThreadFactory(Settings.EMPTY, "selector"), 1, SocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(Settings.EMPTY, "acceptor"), 1,
|
||||
(s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"), 1,
|
||||
() -> new SocketEventHandler(mock(Consumer.class)));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -69,10 +72,12 @@ public class NioGroupTests extends ESTestCase {
|
|||
nioGroup.close();
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public void testExceptionAtStartIsHandled() throws IOException {
|
||||
RuntimeException ex = new RuntimeException();
|
||||
CheckedRunnable<IOException> ctor = () -> new NioGroup(logger, r -> {throw ex;}, 1,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(Settings.EMPTY, "selector"), 1, SocketEventHandler::new);
|
||||
CheckedRunnable<IOException> ctor = () -> new NioGroup(r -> {throw ex;}, 1,
|
||||
(s) -> new AcceptorEventHandler(s, mock(Consumer.class)), daemonThreadFactory(Settings.EMPTY, "selector"),
|
||||
1, () -> new SocketEventHandler(mock(Consumer.class)));
|
||||
RuntimeException runtimeException = expectThrows(RuntimeException.class, ctor::run);
|
||||
assertSame(ex, runtimeException);
|
||||
// ctor starts threads. So we are testing that a failure to construct will stop threads. Our thread
|
||||
|
|
|
@ -36,7 +36,8 @@ import static org.mockito.Mockito.when;
|
|||
|
||||
public class SocketEventHandlerTests extends ESTestCase {
|
||||
|
||||
private Consumer<Exception> exceptionHandler;
|
||||
private Consumer<Exception> channelExceptionHandler;
|
||||
private Consumer<Exception> genericExceptionHandler;
|
||||
|
||||
private ReadWriteHandler readWriteHandler;
|
||||
private SocketEventHandler handler;
|
||||
|
@ -47,15 +48,16 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
@Before
|
||||
@SuppressWarnings("unchecked")
|
||||
public void setUpHandler() throws IOException {
|
||||
exceptionHandler = mock(Consumer.class);
|
||||
channelExceptionHandler = mock(Consumer.class);
|
||||
genericExceptionHandler = mock(Consumer.class);
|
||||
readWriteHandler = mock(ReadWriteHandler.class);
|
||||
SocketSelector selector = mock(SocketSelector.class);
|
||||
handler = new SocketEventHandler(logger);
|
||||
handler = new SocketEventHandler(genericExceptionHandler);
|
||||
rawChannel = mock(SocketChannel.class);
|
||||
channel = new NioSocketChannel(rawChannel);
|
||||
when(rawChannel.finishConnect()).thenReturn(true);
|
||||
|
||||
context = new DoNotRegisterContext(channel, selector, exceptionHandler, new TestSelectionKey(0), readWriteHandler);
|
||||
context = new DoNotRegisterContext(channel, selector, channelExceptionHandler, new TestSelectionKey(0), readWriteHandler);
|
||||
channel.setContext(context);
|
||||
handler.handleRegistration(context);
|
||||
|
||||
|
@ -96,7 +98,7 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
public void testRegistrationExceptionCallsExceptionHandler() throws IOException {
|
||||
CancelledKeyException exception = new CancelledKeyException();
|
||||
handler.registrationException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testConnectDoesNotRemoveOP_CONNECTInterestIfIncomplete() throws IOException {
|
||||
|
@ -114,7 +116,7 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
public void testConnectExceptionCallsExceptionHandler() throws IOException {
|
||||
IOException exception = new IOException();
|
||||
handler.connectException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testHandleReadDelegatesToContext() throws IOException {
|
||||
|
@ -130,13 +132,13 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
public void testReadExceptionCallsExceptionHandler() {
|
||||
IOException exception = new IOException();
|
||||
handler.readException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testWriteExceptionCallsExceptionHandler() {
|
||||
IOException exception = new IOException();
|
||||
handler.writeException(context, exception);
|
||||
verify(exceptionHandler).accept(exception);
|
||||
verify(channelExceptionHandler).accept(exception);
|
||||
}
|
||||
|
||||
public void testPostHandlingCallWillCloseTheChannelIfReady() throws IOException {
|
||||
|
@ -192,6 +194,12 @@ public class SocketEventHandlerTests extends ESTestCase {
|
|||
assertEquals(SelectionKey.OP_READ, key.interestOps());
|
||||
}
|
||||
|
||||
public void testListenerExceptionCallsGenericExceptionHandler() throws IOException {
|
||||
RuntimeException listenerException = new RuntimeException();
|
||||
handler.listenerException(listenerException);
|
||||
verify(genericExceptionHandler).accept(listenerException);
|
||||
}
|
||||
|
||||
private class DoNotRegisterContext extends BytesChannelContext {
|
||||
|
||||
private final TestSelectionKey selectionKey;
|
||||
|
|
|
@ -297,7 +297,7 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
|
||||
socketSelector.executeListener(listener, null);
|
||||
|
||||
verify(eventHandler).listenerException(listener, exception);
|
||||
verify(eventHandler).listenerException(exception);
|
||||
}
|
||||
|
||||
public void testExecuteFailedListenerWillHandleException() throws Exception {
|
||||
|
@ -307,6 +307,6 @@ public class SocketSelectorTests extends ESTestCase {
|
|||
|
||||
socketSelector.executeFailedListener(listener, ioException);
|
||||
|
||||
verify(eventHandler).listenerException(listener, exception);
|
||||
verify(eventHandler).listenerException(exception);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.apache.lucene.analysis.util.CharTokenizer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.analysis.AbstractTokenizerFactory;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
public class CharGroupTokenizerFactory extends AbstractTokenizerFactory{
|
||||
|
||||
private final Set<Integer> tokenizeOnChars = new HashSet<>();
|
||||
private boolean tokenizeOnSpace = false;
|
||||
private boolean tokenizeOnLetter = false;
|
||||
private boolean tokenizeOnDigit = false;
|
||||
private boolean tokenizeOnPunctuation = false;
|
||||
private boolean tokenizeOnSymbol = false;
|
||||
|
||||
public CharGroupTokenizerFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) {
|
||||
super(indexSettings, name, settings);
|
||||
|
||||
for (final String c : settings.getAsList("tokenize_on_chars")) {
|
||||
if (c == null || c.length() == 0) {
|
||||
throw new RuntimeException("[tokenize_on_chars] cannot contain empty characters");
|
||||
}
|
||||
|
||||
if (c.length() == 1) {
|
||||
tokenizeOnChars.add((int) c.charAt(0));
|
||||
}
|
||||
else if (c.charAt(0) == '\\') {
|
||||
tokenizeOnChars.add((int) parseEscapedChar(c));
|
||||
} else {
|
||||
switch (c) {
|
||||
case "letter":
|
||||
tokenizeOnLetter = true;
|
||||
break;
|
||||
case "digit":
|
||||
tokenizeOnDigit = true;
|
||||
break;
|
||||
case "whitespace":
|
||||
tokenizeOnSpace = true;
|
||||
break;
|
||||
case "punctuation":
|
||||
tokenizeOnPunctuation = true;
|
||||
break;
|
||||
case "symbol":
|
||||
tokenizeOnSymbol = true;
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("Invalid escaped char in [" + c + "]");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private char parseEscapedChar(final String s) {
|
||||
int len = s.length();
|
||||
char c = s.charAt(0);
|
||||
if (c == '\\') {
|
||||
if (1 >= len)
|
||||
throw new RuntimeException("Invalid escaped char in [" + s + "]");
|
||||
c = s.charAt(1);
|
||||
switch (c) {
|
||||
case '\\':
|
||||
return '\\';
|
||||
case 'n':
|
||||
return '\n';
|
||||
case 't':
|
||||
return '\t';
|
||||
case 'r':
|
||||
return '\r';
|
||||
case 'b':
|
||||
return '\b';
|
||||
case 'f':
|
||||
return '\f';
|
||||
case 'u':
|
||||
if (len > 6) {
|
||||
throw new RuntimeException("Invalid escaped char in [" + s + "]");
|
||||
}
|
||||
return (char) Integer.parseInt(s.substring(2), 16);
|
||||
default:
|
||||
throw new RuntimeException("Invalid escaped char " + c + " in [" + s + "]");
|
||||
}
|
||||
} else {
|
||||
throw new RuntimeException("Invalid escaped char [" + s + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Tokenizer create() {
|
||||
return new CharTokenizer() {
|
||||
@Override
|
||||
protected boolean isTokenChar(int c) {
|
||||
if (tokenizeOnSpace && Character.isWhitespace(c)) {
|
||||
return false;
|
||||
}
|
||||
if (tokenizeOnLetter && Character.isLetter(c)) {
|
||||
return false;
|
||||
}
|
||||
if (tokenizeOnDigit && Character.isDigit(c)) {
|
||||
return false;
|
||||
}
|
||||
if (tokenizeOnPunctuation && CharMatcher.Basic.PUNCTUATION.isTokenChar(c)) {
|
||||
return false;
|
||||
}
|
||||
if (tokenizeOnSymbol && CharMatcher.Basic.SYMBOL.isTokenChar(c)) {
|
||||
return false;
|
||||
}
|
||||
return !tokenizeOnChars.contains(c);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -184,6 +184,7 @@ public class CommonAnalysisPlugin extends Plugin implements AnalysisPlugin {
|
|||
tokenizers.put("ngram", NGramTokenizerFactory::new);
|
||||
tokenizers.put("edgeNGram", EdgeNGramTokenizerFactory::new);
|
||||
tokenizers.put("edge_ngram", EdgeNGramTokenizerFactory::new);
|
||||
tokenizers.put("char_group", CharGroupTokenizerFactory::new);
|
||||
tokenizers.put("classic", ClassicTokenizerFactory::new);
|
||||
tokenizers.put("letter", LetterTokenizerFactory::new);
|
||||
tokenizers.put("lowercase", LowerCaseTokenizerFactory::new);
|
||||
|
|
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.analysis.common;
|
||||
|
||||
import org.apache.lucene.analysis.Tokenizer;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.StringReader;
|
||||
import java.util.Arrays;
|
||||
|
||||
|
||||
public class CharGroupTokenizerFactoryTests extends ESTokenStreamTestCase {
|
||||
public void testParseTokenChars() {
|
||||
final Index index = new Index("test", "_na_");
|
||||
final Settings indexSettings = newAnalysisSettingsBuilder().build();
|
||||
IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings);
|
||||
final String name = "cg";
|
||||
for (String[] conf : Arrays.asList(
|
||||
new String[] { "\\v" },
|
||||
new String[] { "\\u00245" },
|
||||
new String[] { "commas" },
|
||||
new String[] { "a", "b", "c", "\\$" })) {
|
||||
final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", conf).build();
|
||||
expectThrows(RuntimeException.class, () -> new CharGroupTokenizerFactory(indexProperties, null, name, settings).create());
|
||||
}
|
||||
|
||||
for (String[] conf : Arrays.asList(
|
||||
new String[0],
|
||||
new String[] { "\\n" },
|
||||
new String[] { "\\u0024" },
|
||||
new String[] { "whitespace" },
|
||||
new String[] { "a", "b", "c" },
|
||||
new String[] { "a", "b", "c", "\\r" },
|
||||
new String[] { "\\r" },
|
||||
new String[] { "f", "o", "o", "symbol" })) {
|
||||
final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", Arrays.asList(conf)).build();
|
||||
new CharGroupTokenizerFactory(indexProperties, null, name, settings).create();
|
||||
// no exception
|
||||
}
|
||||
}
|
||||
|
||||
public void testTokenization() throws IOException {
|
||||
final Index index = new Index("test", "_na_");
|
||||
final String name = "cg";
|
||||
final Settings indexSettings = newAnalysisSettingsBuilder().build();
|
||||
final Settings settings = newAnalysisSettingsBuilder().putList("tokenize_on_chars", "whitespace", ":", "\\u0024").build();
|
||||
Tokenizer tokenizer = new CharGroupTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings),
|
||||
null, name, settings).create();
|
||||
tokenizer.setReader(new StringReader("foo bar $34 test:test2"));
|
||||
assertTokenStreamContents(tokenizer, new String[] {"foo", "bar", "34", "test", "test2"});
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
a3dba337d06e1f5930cb7ae638c1655b99ce0cb7
|
|
@ -0,0 +1 @@
|
|||
1e28b448387ec05d655f8c81ee54e13ff2975a4d
|
|
@ -42,7 +42,6 @@ import org.elasticsearch.common.lease.Releasable;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpHandlingSettings;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.rest.AbstractRestChannel;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -59,29 +58,24 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
private final Netty4HttpServerTransport transport;
|
||||
private final Channel channel;
|
||||
private final FullHttpRequest nettyRequest;
|
||||
private final HttpPipelinedRequest pipelinedRequest;
|
||||
private final int sequence;
|
||||
private final ThreadContext threadContext;
|
||||
private final HttpHandlingSettings handlingSettings;
|
||||
|
||||
/**
|
||||
* @param transport The corresponding <code>NettyHttpServerTransport</code> where this channel belongs to.
|
||||
* @param request The request that is handled by this channel.
|
||||
* @param pipelinedRequest If HTTP pipelining is enabled provide the corresponding pipelined request. May be null if
|
||||
* HTTP pipelining is disabled.
|
||||
* @param handlingSettings true iff error messages should include stack traces.
|
||||
* @param threadContext the thread context for the channel
|
||||
* @param transport The corresponding <code>NettyHttpServerTransport</code> where this channel belongs to.
|
||||
* @param request The request that is handled by this channel.
|
||||
* @param sequence The pipelining sequence number for this request
|
||||
* @param handlingSettings true if error messages should include stack traces.
|
||||
* @param threadContext the thread context for the channel
|
||||
*/
|
||||
Netty4HttpChannel(
|
||||
final Netty4HttpServerTransport transport,
|
||||
final Netty4HttpRequest request,
|
||||
final HttpPipelinedRequest pipelinedRequest,
|
||||
final HttpHandlingSettings handlingSettings,
|
||||
final ThreadContext threadContext) {
|
||||
Netty4HttpChannel(Netty4HttpServerTransport transport, Netty4HttpRequest request, int sequence, HttpHandlingSettings handlingSettings,
|
||||
ThreadContext threadContext) {
|
||||
super(request, handlingSettings.getDetailedErrorsEnabled());
|
||||
this.transport = transport;
|
||||
this.channel = request.getChannel();
|
||||
this.nettyRequest = request.request();
|
||||
this.pipelinedRequest = pipelinedRequest;
|
||||
this.sequence = sequence;
|
||||
this.threadContext = threadContext;
|
||||
this.handlingSettings = handlingSettings;
|
||||
}
|
||||
|
@ -129,7 +123,7 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
final ChannelPromise promise = channel.newPromise();
|
||||
|
||||
if (releaseContent) {
|
||||
promise.addListener(f -> ((Releasable)content).close());
|
||||
promise.addListener(f -> ((Releasable) content).close());
|
||||
}
|
||||
|
||||
if (releaseBytesStreamOutput) {
|
||||
|
@ -140,13 +134,9 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
promise.addListener(ChannelFutureListener.CLOSE);
|
||||
}
|
||||
|
||||
final Object msg;
|
||||
if (pipelinedRequest != null) {
|
||||
msg = pipelinedRequest.createHttpResponse(resp, promise);
|
||||
} else {
|
||||
msg = resp;
|
||||
}
|
||||
channel.writeAndFlush(msg, promise);
|
||||
Netty4HttpResponse newResponse = new Netty4HttpResponse(sequence, resp);
|
||||
|
||||
channel.writeAndFlush(newResponse, promise);
|
||||
releaseContent = false;
|
||||
releaseBytesStreamOutput = false;
|
||||
} finally {
|
||||
|
@ -156,9 +146,6 @@ final class Netty4HttpChannel extends AbstractRestChannel {
|
|||
if (releaseBytesStreamOutput) {
|
||||
bytesOutputOrNull().close();
|
||||
}
|
||||
if (pipelinedRequest != null) {
|
||||
pipelinedRequest.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.channel.ChannelDuplexHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpPipeliningAggregator;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests.
|
||||
*/
|
||||
public class Netty4HttpPipeliningHandler extends ChannelDuplexHandler {
|
||||
|
||||
private final Logger logger;
|
||||
private final HttpPipeliningAggregator<Netty4HttpResponse, ChannelPromise> aggregator;
|
||||
|
||||
/**
|
||||
* Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation.
|
||||
*
|
||||
* @param logger for logging unexpected errors
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
|
||||
* required as events cannot queue up indefinitely
|
||||
*/
|
||||
public Netty4HttpPipeliningHandler(Logger logger, final int maxEventsHeld) {
|
||||
this.logger = logger;
|
||||
this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(final ChannelHandlerContext ctx, final Object msg) {
|
||||
if (msg instanceof LastHttpContent) {
|
||||
HttpPipelinedRequest<LastHttpContent> pipelinedRequest = aggregator.read(((LastHttpContent) msg).retain());
|
||||
ctx.fireChannelRead(pipelinedRequest);
|
||||
} else {
|
||||
ctx.fireChannelRead(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
|
||||
assert msg instanceof Netty4HttpResponse : "Message must be type: " + Netty4HttpResponse.class;
|
||||
Netty4HttpResponse response = (Netty4HttpResponse) msg;
|
||||
boolean success = false;
|
||||
try {
|
||||
List<Tuple<Netty4HttpResponse, ChannelPromise>> readyResponses = aggregator.write(response, promise);
|
||||
for (Tuple<Netty4HttpResponse, ChannelPromise> readyResponse : readyResponses) {
|
||||
ctx.write(readyResponse.v1().getResponse(), readyResponse.v2());
|
||||
}
|
||||
success = true;
|
||||
} catch (IllegalStateException e) {
|
||||
ctx.channel().close();
|
||||
} finally {
|
||||
if (success == false) {
|
||||
promise.setFailure(new ClosedChannelException());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
|
||||
List<Tuple<Netty4HttpResponse, ChannelPromise>> inflightResponses = aggregator.removeAllInflightResponses();
|
||||
|
||||
if (inflightResponses.isEmpty() == false) {
|
||||
ClosedChannelException closedChannelException = new ClosedChannelException();
|
||||
for (Tuple<Netty4HttpResponse, ChannelPromise> inflightResponse : inflightResponses) {
|
||||
try {
|
||||
inflightResponse.v2().setFailure(closedChannelException);
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("unexpected error while releasing pipelined http responses", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.close(promise);
|
||||
}
|
||||
}
|
|
@ -30,41 +30,30 @@ import io.netty.handler.codec.http.FullHttpRequest;
|
|||
import io.netty.handler.codec.http.HttpHeaders;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpHandlingSettings;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.util.Collections;
|
||||
|
||||
@ChannelHandler.Sharable
|
||||
class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
||||
class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> {
|
||||
|
||||
private final Netty4HttpServerTransport serverTransport;
|
||||
private final HttpHandlingSettings handlingSettings;
|
||||
private final boolean httpPipeliningEnabled;
|
||||
private final ThreadContext threadContext;
|
||||
|
||||
Netty4HttpRequestHandler(Netty4HttpServerTransport serverTransport, HttpHandlingSettings handlingSettings,
|
||||
ThreadContext threadContext) {
|
||||
this.serverTransport = serverTransport;
|
||||
this.httpPipeliningEnabled = serverTransport.pipelining;
|
||||
this.handlingSettings = handlingSettings;
|
||||
this.threadContext = threadContext;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
|
||||
final FullHttpRequest request;
|
||||
final HttpPipelinedRequest pipelinedRequest;
|
||||
if (this.httpPipeliningEnabled && msg instanceof HttpPipelinedRequest) {
|
||||
pipelinedRequest = (HttpPipelinedRequest) msg;
|
||||
request = (FullHttpRequest) pipelinedRequest.last();
|
||||
} else {
|
||||
pipelinedRequest = null;
|
||||
request = (FullHttpRequest) msg;
|
||||
}
|
||||
protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) throws Exception {
|
||||
final FullHttpRequest request = msg.getRequest();
|
||||
|
||||
boolean success = false;
|
||||
try {
|
||||
|
||||
final FullHttpRequest copy =
|
||||
|
@ -111,7 +100,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
|||
Netty4HttpChannel innerChannel;
|
||||
try {
|
||||
innerChannel =
|
||||
new Netty4HttpChannel(serverTransport, httpRequest, pipelinedRequest, handlingSettings, threadContext);
|
||||
new Netty4HttpChannel(serverTransport, httpRequest, msg.getSequence(), handlingSettings, threadContext);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
if (badRequestCause == null) {
|
||||
badRequestCause = e;
|
||||
|
@ -126,7 +115,7 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
|||
copy,
|
||||
ctx.channel());
|
||||
innerChannel =
|
||||
new Netty4HttpChannel(serverTransport, innerRequest, pipelinedRequest, handlingSettings, threadContext);
|
||||
new Netty4HttpChannel(serverTransport, innerRequest, msg.getSequence(), handlingSettings, threadContext);
|
||||
}
|
||||
channel = innerChannel;
|
||||
}
|
||||
|
@ -138,12 +127,9 @@ class Netty4HttpRequestHandler extends SimpleChannelInboundHandler<Object> {
|
|||
} else {
|
||||
serverTransport.dispatchRequest(httpRequest, channel);
|
||||
}
|
||||
success = true;
|
||||
} finally {
|
||||
// the request is otherwise released in case of dispatch
|
||||
if (success == false && pipelinedRequest != null) {
|
||||
pipelinedRequest.release();
|
||||
}
|
||||
// As we have copied the buffer, we can release the request
|
||||
request.release();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.http.HttpPipelinedMessage;
|
||||
|
||||
public class Netty4HttpResponse extends HttpPipelinedMessage {
|
||||
|
||||
private final FullHttpResponse response;
|
||||
|
||||
public Netty4HttpResponse(int sequence, FullHttpResponse response) {
|
||||
super(sequence);
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
public FullHttpResponse getResponse() {
|
||||
return response;
|
||||
}
|
||||
}
|
|
@ -62,7 +62,6 @@ import org.elasticsearch.http.HttpStats;
|
|||
import org.elasticsearch.http.netty4.cors.Netty4CorsConfig;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsConfigBuilder;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipeliningHandler;
|
||||
import org.elasticsearch.rest.RestUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.netty4.Netty4OpenChannelsHandler;
|
||||
|
@ -99,7 +98,6 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_D
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
import static org.elasticsearch.http.netty4.cors.Netty4CorsHandler.ANY_ORIGIN;
|
||||
|
||||
|
@ -162,8 +160,6 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
|
||||
protected final int workerCount;
|
||||
|
||||
protected final boolean pipelining;
|
||||
|
||||
protected final int pipeliningMaxEvents;
|
||||
|
||||
/**
|
||||
|
@ -204,6 +200,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
this.maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
this.maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
this.maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()),
|
||||
Math.toIntExact(maxChunkSize.getBytes()),
|
||||
Math.toIntExact(maxHeaderSize.getBytes()),
|
||||
|
@ -211,7 +208,8 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
SETTING_HTTP_RESET_COOKIES.get(settings),
|
||||
SETTING_HTTP_COMPRESSION.get(settings),
|
||||
SETTING_HTTP_COMPRESSION_LEVEL.get(settings),
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings));
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings),
|
||||
pipeliningMaxEvents);
|
||||
|
||||
this.maxCompositeBufferComponents = SETTING_HTTP_NETTY_MAX_COMPOSITE_BUFFER_COMPONENTS.get(settings);
|
||||
this.workerCount = SETTING_HTTP_WORKER_COUNT.get(settings);
|
||||
|
@ -226,14 +224,12 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings);
|
||||
recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt());
|
||||
|
||||
this.pipelining = SETTING_PIPELINING.get(settings);
|
||||
this.pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.corsConfig = buildCorsConfig(settings);
|
||||
|
||||
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}], " +
|
||||
"receive_predictor[{}], max_composite_buffer_components[{}], pipelining[{}], pipelining_max_events[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, this.maxContentLength, receivePredictor, maxCompositeBufferComponents,
|
||||
pipelining, pipeliningMaxEvents);
|
||||
"receive_predictor[{}], max_composite_buffer_components[{}], pipelining_max_events[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, receivePredictor, maxCompositeBufferComponents,
|
||||
pipeliningMaxEvents);
|
||||
}
|
||||
|
||||
public Settings settings() {
|
||||
|
@ -452,9 +448,7 @@ public class Netty4HttpServerTransport extends AbstractHttpServerTransport {
|
|||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
ch.pipeline().addLast("cors", new Netty4CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
if (transport.pipelining) {
|
||||
ch.pipeline().addLast("pipelining", new HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents));
|
||||
}
|
||||
ch.pipeline().addLast("pipelining", new Netty4HttpPipeliningHandler(transport.logger, transport.pipeliningMaxEvents));
|
||||
ch.pipeline().addLast("handler", requestHandler);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,88 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4.pipelining;
|
||||
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
|
||||
/**
|
||||
* Permits downstream channel events to be ordered and signalled as to whether more are to come for
|
||||
* a given sequence.
|
||||
*/
|
||||
public class HttpPipelinedRequest implements ReferenceCounted {
|
||||
|
||||
private final LastHttpContent last;
|
||||
private final int sequence;
|
||||
|
||||
public HttpPipelinedRequest(final LastHttpContent last, final int sequence) {
|
||||
this.last = last;
|
||||
this.sequence = sequence;
|
||||
}
|
||||
|
||||
public LastHttpContent last() {
|
||||
return last;
|
||||
}
|
||||
|
||||
public HttpPipelinedResponse createHttpResponse(final FullHttpResponse response, final ChannelPromise promise) {
|
||||
return new HttpPipelinedResponse(response, promise, sequence);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int refCnt() {
|
||||
return last.refCnt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain() {
|
||||
last.retain();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain(int increment) {
|
||||
last.retain(increment);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch() {
|
||||
last.touch();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch(Object hint) {
|
||||
last.touch(hint);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release() {
|
||||
return last.release();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release(int decrement) {
|
||||
return last.release(decrement);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
package org.elasticsearch.http.netty4.pipelining;
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
|
||||
class HttpPipelinedResponse implements Comparable<HttpPipelinedResponse>, ReferenceCounted {
|
||||
|
||||
private final FullHttpResponse response;
|
||||
private final ChannelPromise promise;
|
||||
private final int sequence;
|
||||
|
||||
HttpPipelinedResponse(FullHttpResponse response, ChannelPromise promise, int sequence) {
|
||||
this.response = response;
|
||||
this.promise = promise;
|
||||
this.sequence = sequence;
|
||||
}
|
||||
|
||||
public FullHttpResponse response() {
|
||||
return response;
|
||||
}
|
||||
|
||||
public ChannelPromise promise() {
|
||||
return promise;
|
||||
}
|
||||
|
||||
public int sequence() {
|
||||
return sequence;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTo(HttpPipelinedResponse o) {
|
||||
return Integer.compare(sequence, o.sequence);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int refCnt() {
|
||||
return response.refCnt();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain() {
|
||||
response.retain();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted retain(int increment) {
|
||||
response.retain(increment);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch() {
|
||||
response.touch();
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReferenceCounted touch(Object hint) {
|
||||
response.touch(hint);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release() {
|
||||
return response.release();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean release(int decrement) {
|
||||
return response.release(decrement);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4.pipelining;
|
||||
|
||||
import io.netty.channel.ChannelDuplexHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.transport.netty4.Netty4Utils;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.Collections;
|
||||
import java.util.PriorityQueue;
|
||||
|
||||
/**
|
||||
* Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests.
|
||||
*/
|
||||
public class HttpPipeliningHandler extends ChannelDuplexHandler {
|
||||
|
||||
// we use a priority queue so that responses are ordered by their sequence number
|
||||
private final PriorityQueue<HttpPipelinedResponse> holdingQueue;
|
||||
|
||||
private final Logger logger;
|
||||
private final int maxEventsHeld;
|
||||
|
||||
/*
|
||||
* The current read and write sequence numbers. Read sequence numbers are attached to requests in the order they are read from the
|
||||
* channel, and then transferred to responses. A response is not written to the channel context until its sequence number matches the
|
||||
* current write sequence, implying that all preceding messages have been written.
|
||||
*/
|
||||
private int readSequence;
|
||||
private int writeSequence;
|
||||
|
||||
/**
|
||||
* Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation.
|
||||
*
|
||||
* @param logger for logging unexpected errors
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
|
||||
* required as events cannot queue up indefinitely
|
||||
*/
|
||||
public HttpPipeliningHandler(Logger logger, final int maxEventsHeld) {
|
||||
this.logger = logger;
|
||||
this.maxEventsHeld = maxEventsHeld;
|
||||
this.holdingQueue = new PriorityQueue<>(1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception {
|
||||
if (msg instanceof LastHttpContent) {
|
||||
ctx.fireChannelRead(new HttpPipelinedRequest(((LastHttpContent) msg).retain(), readSequence++));
|
||||
} else {
|
||||
ctx.fireChannelRead(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) throws Exception {
|
||||
if (msg instanceof HttpPipelinedResponse) {
|
||||
final HttpPipelinedResponse current = (HttpPipelinedResponse) msg;
|
||||
/*
|
||||
* We attach the promise to the response. When we invoke a write on the channel with the response, we must ensure that we invoke
|
||||
* the write methods that accept the same promise that we have attached to the response otherwise as the response proceeds
|
||||
* through the handler pipeline a different promise will be used until reaching this handler. Therefore, we assert here that the
|
||||
* attached promise is identical to the provided promise as a safety mechanism that we are respecting this.
|
||||
*/
|
||||
assert current.promise() == promise;
|
||||
|
||||
boolean channelShouldClose = false;
|
||||
|
||||
synchronized (holdingQueue) {
|
||||
if (holdingQueue.size() < maxEventsHeld) {
|
||||
holdingQueue.add(current);
|
||||
|
||||
while (!holdingQueue.isEmpty()) {
|
||||
/*
|
||||
* Since the response with the lowest sequence number is the top of the priority queue, we know if its sequence
|
||||
* number does not match the current write sequence number then we have not processed all preceding responses yet.
|
||||
*/
|
||||
final HttpPipelinedResponse top = holdingQueue.peek();
|
||||
if (top.sequence() != writeSequence) {
|
||||
break;
|
||||
}
|
||||
holdingQueue.remove();
|
||||
/*
|
||||
* We must use the promise attached to the response; this is necessary since are going to hold a response until all
|
||||
* responses that precede it in the pipeline are written first. Note that the promise from the method invocation is
|
||||
* not ignored, it will already be attached to an existing response and consumed when that response is drained.
|
||||
*/
|
||||
ctx.write(top.response(), top.promise());
|
||||
writeSequence++;
|
||||
}
|
||||
} else {
|
||||
channelShouldClose = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (channelShouldClose) {
|
||||
try {
|
||||
Netty4Utils.closeChannels(Collections.singletonList(ctx.channel()));
|
||||
} finally {
|
||||
current.release();
|
||||
promise.setSuccess();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ctx.write(msg, promise);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception {
|
||||
if (holdingQueue.isEmpty() == false) {
|
||||
ClosedChannelException closedChannelException = new ClosedChannelException();
|
||||
HttpPipelinedResponse pipelinedResponse;
|
||||
while ((pipelinedResponse = holdingQueue.poll()) != null) {
|
||||
try {
|
||||
pipelinedResponse.release();
|
||||
pipelinedResponse.promise().setFailure(closedChannelException);
|
||||
} catch (Exception e) {
|
||||
logger.error("unexpected error while releasing pipelined http responses", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.close(promise);
|
||||
}
|
||||
}
|
|
@ -60,7 +60,6 @@ import org.elasticsearch.http.HttpHandlingSettings;
|
|||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.http.NullDispatcher;
|
||||
import org.elasticsearch.http.netty4.cors.Netty4CorsHandler;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.rest.BytesRestResponse;
|
||||
import org.elasticsearch.rest.RestResponse;
|
||||
|
@ -212,12 +211,12 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
|
||||
httpRequest.headers().add(HttpHeaderNames.ORIGIN, "remote");
|
||||
final WriteCapturingChannel writeCapturingChannel = new WriteCapturingChannel();
|
||||
Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel);
|
||||
final Netty4HttpRequest request = new Netty4HttpRequest(xContentRegistry(), httpRequest, writeCapturingChannel);
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
|
||||
// send a response
|
||||
Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
TestResponse resp = new TestResponse();
|
||||
final String customHeader = "custom-header";
|
||||
final String customHeaderValue = "xyz";
|
||||
|
@ -227,7 +226,7 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
// inspect what was written
|
||||
List<Object> writtenObjects = writeCapturingChannel.getWrittenObjects();
|
||||
assertThat(writtenObjects.size(), is(1));
|
||||
HttpResponse response = (HttpResponse) writtenObjects.get(0);
|
||||
HttpResponse response = ((Netty4HttpResponse) writtenObjects.get(0)).getResponse();
|
||||
assertThat(response.headers().get("non-existent-header"), nullValue());
|
||||
assertThat(response.headers().get(customHeader), equalTo(customHeaderValue));
|
||||
assertThat(response.headers().get(HttpHeaderNames.CONTENT_LENGTH), equalTo(Integer.toString(resp.content().length())));
|
||||
|
@ -243,10 +242,9 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel();
|
||||
final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel);
|
||||
final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null;
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
final Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
final TestResponse response = new TestResponse(bigArrays);
|
||||
assertThat(response.content(), instanceOf(Releasable.class));
|
||||
embeddedChannel.close();
|
||||
|
@ -263,10 +261,9 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
final FullHttpRequest httpRequest = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel();
|
||||
final Netty4HttpRequest request = new Netty4HttpRequest(registry, httpRequest, embeddedChannel);
|
||||
final HttpPipelinedRequest pipelinedRequest = randomBoolean() ? new HttpPipelinedRequest(request.request(), 1) : null;
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
final Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, pipelinedRequest, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
final BytesRestResponse response = new BytesRestResponse(RestStatus.INTERNAL_SERVER_ERROR,
|
||||
JsonXContent.contentBuilder().startObject().endObject());
|
||||
assertThat(response.content(), not(instanceOf(Releasable.class)));
|
||||
|
@ -312,7 +309,7 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
assertTrue(embeddedChannel.isOpen());
|
||||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
final Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
final TestResponse resp = new TestResponse();
|
||||
channel.sendResponse(resp);
|
||||
assertThat(embeddedChannel.isOpen(), equalTo(!close));
|
||||
|
@ -340,13 +337,13 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
HttpHandlingSettings handlingSettings = httpServerTransport.httpHandlingSettings;
|
||||
|
||||
Netty4HttpChannel channel =
|
||||
new Netty4HttpChannel(httpServerTransport, request, null, handlingSettings, threadPool.getThreadContext());
|
||||
new Netty4HttpChannel(httpServerTransport, request, 1, handlingSettings, threadPool.getThreadContext());
|
||||
channel.sendResponse(new TestResponse());
|
||||
|
||||
// get the response
|
||||
List<Object> writtenObjects = writeCapturingChannel.getWrittenObjects();
|
||||
assertThat(writtenObjects.size(), is(1));
|
||||
return (FullHttpResponse) writtenObjects.get(0);
|
||||
return ((Netty4HttpResponse) writtenObjects.get(0)).getResponse();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty4.pipelining;
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
|
@ -37,6 +37,7 @@ import io.netty.handler.codec.http.HttpVersion;
|
|||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.handler.codec.http.QueryStringDecoder;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
|
||||
|
@ -62,7 +63,8 @@ import static org.hamcrest.core.Is.is;
|
|||
|
||||
public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
||||
|
||||
private final ExecutorService executorService = Executors.newFixedThreadPool(randomIntBetween(4, 8));
|
||||
private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8));
|
||||
private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1);
|
||||
private final Map<String, CountDownLatch> waitingRequests = new ConcurrentHashMap<>();
|
||||
private final Map<String, CountDownLatch> finishingRequests = new ConcurrentHashMap<>();
|
||||
|
||||
|
@ -79,15 +81,19 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private void shutdownExecutorService() throws InterruptedException {
|
||||
if (!executorService.isShutdown()) {
|
||||
executorService.shutdown();
|
||||
executorService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
if (!handlerService.isShutdown()) {
|
||||
handlerService.shutdown();
|
||||
handlerService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
if (!eventLoopService.isShutdown()) {
|
||||
eventLoopService.shutdown();
|
||||
eventLoopService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
|
@ -114,7 +120,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
|
@ -147,7 +153,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(
|
||||
new AggregateUrisAndHeadersHandler(),
|
||||
new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
|
@ -176,7 +182,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests),
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < 1 + numberOfRequests + 1; i++) {
|
||||
|
@ -184,7 +190,7 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
final List<Integer> requests = IntStream.range(1, numberOfRequests + 1).mapToObj(r -> r).collect(Collectors.toList());
|
||||
final List<Integer> requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList());
|
||||
Randomness.shuffle(requests);
|
||||
|
||||
for (final Integer request : requests) {
|
||||
|
@ -205,25 +211,26 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
public void testPipeliningRequestsAreReleased() throws InterruptedException {
|
||||
final int numberOfRequests = 10;
|
||||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(new HttpPipeliningHandler(logger, numberOfRequests + 1));
|
||||
new EmbeddedChannel(new Netty4HttpPipeliningHandler(logger, numberOfRequests + 1));
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + i));
|
||||
}
|
||||
|
||||
HttpPipelinedRequest inbound;
|
||||
ArrayList<HttpPipelinedRequest> requests = new ArrayList<>();
|
||||
HttpPipelinedRequest<FullHttpRequest> inbound;
|
||||
ArrayList<HttpPipelinedRequest<FullHttpRequest>> requests = new ArrayList<>();
|
||||
while ((inbound = embeddedChannel.readInbound()) != null) {
|
||||
requests.add(inbound);
|
||||
}
|
||||
|
||||
ArrayList<ChannelPromise> promises = new ArrayList<>();
|
||||
for (int i = 1; i < requests.size(); ++i) {
|
||||
final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK);
|
||||
final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK);
|
||||
ChannelPromise promise = embeddedChannel.newPromise();
|
||||
promises.add(promise);
|
||||
HttpPipelinedResponse response = requests.get(i).createHttpResponse(httpResponse, promise);
|
||||
embeddedChannel.writeAndFlush(response, promise);
|
||||
int sequence = requests.get(i).getSequence();
|
||||
Netty4HttpResponse resp = new Netty4HttpResponse(sequence, httpResponse);
|
||||
embeddedChannel.writeAndFlush(resp, promise);
|
||||
}
|
||||
|
||||
for (ChannelPromise promise : promises) {
|
||||
|
@ -260,14 +267,14 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
private class WorkEmulatorHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest> {
|
||||
private class WorkEmulatorHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<LastHttpContent>> {
|
||||
|
||||
@Override
|
||||
protected void channelRead0(final ChannelHandlerContext ctx, final HttpPipelinedRequest pipelinedRequest) throws Exception {
|
||||
protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest<LastHttpContent> pipelinedRequest) {
|
||||
LastHttpContent request = pipelinedRequest.getRequest();
|
||||
final QueryStringDecoder decoder;
|
||||
if (pipelinedRequest.last() instanceof FullHttpRequest) {
|
||||
final FullHttpRequest fullHttpRequest = (FullHttpRequest) pipelinedRequest.last();
|
||||
decoder = new QueryStringDecoder(fullHttpRequest.uri());
|
||||
if (request instanceof FullHttpRequest) {
|
||||
decoder = new QueryStringDecoder(((FullHttpRequest)request).uri());
|
||||
} else {
|
||||
decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll());
|
||||
}
|
||||
|
@ -282,12 +289,14 @@ public class Netty4HttpPipeliningHandlerTests extends ESTestCase {
|
|||
final CountDownLatch finishingLatch = new CountDownLatch(1);
|
||||
finishingRequests.put(uri, finishingLatch);
|
||||
|
||||
executorService.submit(() -> {
|
||||
handlerService.submit(() -> {
|
||||
try {
|
||||
waitingLatch.await(1000, TimeUnit.SECONDS);
|
||||
final ChannelPromise promise = ctx.newPromise();
|
||||
ctx.write(pipelinedRequest.createHttpResponse(httpResponse, promise), promise);
|
||||
finishingLatch.countDown();
|
||||
eventLoopService.submit(() -> {
|
||||
ctx.write(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise);
|
||||
finishingLatch.countDown();
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
fail(e.toString());
|
||||
}
|
|
@ -38,9 +38,9 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.http.NullDispatcher;
|
||||
import org.elasticsearch.http.netty4.pipelining.HttpPipelinedRequest;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -52,16 +52,11 @@ import java.nio.charset.StandardCharsets;
|
|||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
/**
|
||||
* This test just tests, if he pipelining works in general with out any connection the Elasticsearch handler
|
||||
|
@ -85,9 +80,8 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testThatHttpPipeliningWorksWhenEnabled() throws Exception {
|
||||
public void testThatHttpPipeliningWorks() throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put("http.pipelining", true)
|
||||
.put("http.port", "0")
|
||||
.build();
|
||||
try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) {
|
||||
|
@ -112,48 +106,6 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testThatHttpPipeliningCanBeDisabled() throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put("http.pipelining", false)
|
||||
.put("http.port", "0")
|
||||
.build();
|
||||
try (HttpServerTransport httpServerTransport = new CustomNettyHttpServerTransport(settings)) {
|
||||
httpServerTransport.start();
|
||||
final TransportAddress transportAddress = randomFrom(httpServerTransport.boundAddress().boundAddresses());
|
||||
|
||||
final int numberOfRequests = randomIntBetween(4, 16);
|
||||
final Set<Integer> slowIds = new HashSet<>();
|
||||
final List<String> requests = new ArrayList<>(numberOfRequests);
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
if (rarely()) {
|
||||
requests.add("/slow/" + i);
|
||||
slowIds.add(i);
|
||||
} else {
|
||||
requests.add("/" + i);
|
||||
}
|
||||
}
|
||||
|
||||
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
|
||||
Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests.toArray(new String[]{}));
|
||||
List<String> responseBodies = new ArrayList<>(Netty4HttpClient.returnHttpResponseBodies(responses));
|
||||
// we can not be sure about the order of the responses, but the slow ones should come last
|
||||
assertThat(responseBodies, hasSize(numberOfRequests));
|
||||
for (int i = 0; i < numberOfRequests - slowIds.size(); i++) {
|
||||
assertThat(responseBodies.get(i), matches("/\\d+"));
|
||||
}
|
||||
|
||||
final Set<Integer> ids = new HashSet<>();
|
||||
for (int i = 0; i < slowIds.size(); i++) {
|
||||
final String response = responseBodies.get(numberOfRequests - slowIds.size() + i);
|
||||
assertThat(response, matches("/slow/\\d+" ));
|
||||
assertTrue(ids.add(Integer.parseInt(response.split("/")[2])));
|
||||
}
|
||||
|
||||
assertThat(slowIds, equalTo(ids));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class CustomNettyHttpServerTransport extends Netty4HttpServerTransport {
|
||||
|
||||
private final ExecutorService executorService = Executors.newCachedThreadPool();
|
||||
|
@ -196,7 +148,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
|
||||
}
|
||||
|
||||
class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler<Object> {
|
||||
class PossiblySlowUpstreamHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<FullHttpRequest>> {
|
||||
|
||||
private final ExecutorService executorService;
|
||||
|
||||
|
@ -205,7 +157,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception {
|
||||
protected void channelRead0(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) throws Exception {
|
||||
executorService.submit(new PossiblySlowRunnable(ctx, msg));
|
||||
}
|
||||
|
||||
|
@ -220,26 +172,18 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
class PossiblySlowRunnable implements Runnable {
|
||||
|
||||
private ChannelHandlerContext ctx;
|
||||
private HttpPipelinedRequest pipelinedRequest;
|
||||
private HttpPipelinedRequest<FullHttpRequest> pipelinedRequest;
|
||||
private FullHttpRequest fullHttpRequest;
|
||||
|
||||
PossiblySlowRunnable(ChannelHandlerContext ctx, Object msg) {
|
||||
PossiblySlowRunnable(ChannelHandlerContext ctx, HttpPipelinedRequest<FullHttpRequest> msg) {
|
||||
this.ctx = ctx;
|
||||
if (msg instanceof HttpPipelinedRequest) {
|
||||
this.pipelinedRequest = (HttpPipelinedRequest) msg;
|
||||
} else if (msg instanceof FullHttpRequest) {
|
||||
this.fullHttpRequest = (FullHttpRequest) msg;
|
||||
}
|
||||
this.pipelinedRequest = msg;
|
||||
this.fullHttpRequest = pipelinedRequest.getRequest();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
final String uri;
|
||||
if (pipelinedRequest != null && pipelinedRequest.last() instanceof FullHttpRequest) {
|
||||
uri = ((FullHttpRequest) pipelinedRequest.last()).uri();
|
||||
} else {
|
||||
uri = fullHttpRequest.uri();
|
||||
}
|
||||
final String uri = fullHttpRequest.uri();
|
||||
|
||||
final ByteBuf buffer = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8);
|
||||
|
||||
|
@ -258,13 +202,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
}
|
||||
|
||||
final ChannelPromise promise = ctx.newPromise();
|
||||
final Object msg;
|
||||
if (pipelinedRequest != null) {
|
||||
msg = pipelinedRequest.createHttpResponse(httpResponse, promise);
|
||||
} else {
|
||||
msg = httpResponse;
|
||||
}
|
||||
ctx.writeAndFlush(msg, promise);
|
||||
ctx.writeAndFlush(new Netty4HttpResponse(pipelinedRequest.getSequence(), httpResponse), promise);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -21,8 +21,6 @@ package org.elasticsearch.http.netty4;
|
|||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.ESNetty4IntegTestCase;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
@ -35,21 +33,13 @@ import static org.hamcrest.Matchers.hasSize;
|
|||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class Netty4PipeliningEnabledIT extends ESNetty4IntegTestCase {
|
||||
public class Netty4PipeliningIT extends ESNetty4IntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("http.pipelining", true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testThatNettyHttpServerSupportsPipelining() throws Exception {
|
||||
String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"};
|
||||
|
|
@ -1 +0,0 @@
|
|||
473a7f4d955f132bb498482648266653f8da85bd
|
|
@ -0,0 +1 @@
|
|||
452c9a9f86b79b9b3eaa7d6aa782e189d5bcfe8f
|
|
@ -1 +0,0 @@
|
|||
c5a72b9a790e2552248c8bbb36af47c4c399ba27
|
|
@ -0,0 +1 @@
|
|||
48c76a922bdfc7f50b1b6fe22e9456c555f3f990
|
|
@ -1 +0,0 @@
|
|||
14f680ab9b886c7c5224ff682a7fa70b6df44a05
|
|
@ -0,0 +1 @@
|
|||
4db5777df468b0867ff6539c9ab687e0ed6cab41
|
|
@ -1 +0,0 @@
|
|||
e033c68c9ec1ba9cd8439758adf7eb5fee22acef
|
|
@ -0,0 +1 @@
|
|||
0e09e6b011ab2b1a0e3e0e1df2ab2a91dca8ba23
|
|
@ -1 +0,0 @@
|
|||
08df0a5029f11c109b22064dec78c05dfa25f9e3
|
|
@ -0,0 +1 @@
|
|||
ceefa0f9789ab9ea5c8ab9f67ed7a601a3ae6aa9
|
|
@ -1 +0,0 @@
|
|||
a9d1819b2b13f134f6a605ab5a59ce3c602c0460
|
|
@ -0,0 +1 @@
|
|||
b013adc183e52a74795ad3d3032f4d0f9db30b73
|
|
@ -1 +0,0 @@
|
|||
47bc91ccb0cdf0c1c404646ffe0d5fd6b020a4ab
|
|
@ -0,0 +1 @@
|
|||
95300f29418f60e57e022d934d3462be9e1e2225
|
|
@ -25,20 +25,21 @@ import io.netty.handler.codec.ByteToMessageDecoder;
|
|||
import io.netty.handler.codec.http.DefaultFullHttpRequest;
|
||||
import io.netty.handler.codec.http.DefaultHttpHeaders;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.HttpContentCompressor;
|
||||
import io.netty.handler.codec.http.HttpContentDecompressor;
|
||||
import io.netty.handler.codec.http.HttpHeaders;
|
||||
import io.netty.handler.codec.http.HttpObjectAggregator;
|
||||
import io.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import io.netty.handler.codec.http.HttpResponseEncoder;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.http.HttpHandlingSettings;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.nio.FlushOperation;
|
||||
import org.elasticsearch.nio.InboundChannelBuffer;
|
||||
import org.elasticsearch.nio.ReadWriteHandler;
|
||||
import org.elasticsearch.nio.NioSocketChannel;
|
||||
import org.elasticsearch.nio.ReadWriteHandler;
|
||||
import org.elasticsearch.nio.SocketChannelContext;
|
||||
import org.elasticsearch.nio.WriteOperation;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
@ -77,6 +78,7 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
|||
if (settings.isCompression()) {
|
||||
handlers.add(new HttpContentCompressor(settings.getCompressionLevel()));
|
||||
}
|
||||
handlers.add(new NioHttpPipeliningHandler(transport.getLogger(), settings.getPipeliningMaxEvents()));
|
||||
|
||||
adaptor = new NettyAdaptor(handlers.toArray(new ChannelHandler[0]));
|
||||
adaptor.addCloseListener((v, e) -> nioChannel.close());
|
||||
|
@ -95,9 +97,9 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
|||
|
||||
@Override
|
||||
public WriteOperation createWriteOperation(SocketChannelContext context, Object message, BiConsumer<Void, Throwable> listener) {
|
||||
assert message instanceof FullHttpResponse : "This channel only supports messages that are of type: " + FullHttpResponse.class
|
||||
+ ". Found type: " + message.getClass() + ".";
|
||||
return new HttpWriteOperation(context, (FullHttpResponse) message, listener);
|
||||
assert message instanceof NioHttpResponse : "This channel only supports messages that are of type: "
|
||||
+ NioHttpResponse.class + ". Found type: " + message.getClass() + ".";
|
||||
return new HttpWriteOperation(context, (NioHttpResponse) message, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -125,76 +127,85 @@ public class HttpReadWriteHandler implements ReadWriteHandler {
|
|||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private void handleRequest(Object msg) {
|
||||
final FullHttpRequest request = (FullHttpRequest) msg;
|
||||
final HttpPipelinedRequest<FullHttpRequest> pipelinedRequest = (HttpPipelinedRequest<FullHttpRequest>) msg;
|
||||
FullHttpRequest request = pipelinedRequest.getRequest();
|
||||
|
||||
final FullHttpRequest copiedRequest =
|
||||
new DefaultFullHttpRequest(
|
||||
request.protocolVersion(),
|
||||
request.method(),
|
||||
request.uri(),
|
||||
Unpooled.copiedBuffer(request.content()),
|
||||
request.headers(),
|
||||
request.trailingHeaders());
|
||||
try {
|
||||
final FullHttpRequest copiedRequest =
|
||||
new DefaultFullHttpRequest(
|
||||
request.protocolVersion(),
|
||||
request.method(),
|
||||
request.uri(),
|
||||
Unpooled.copiedBuffer(request.content()),
|
||||
request.headers(),
|
||||
request.trailingHeaders());
|
||||
|
||||
Exception badRequestCause = null;
|
||||
Exception badRequestCause = null;
|
||||
|
||||
/*
|
||||
* We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there
|
||||
* are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we
|
||||
* attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header,
|
||||
* or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the
|
||||
* underlying exception that caused us to treat the request as bad.
|
||||
*/
|
||||
final NioHttpRequest httpRequest;
|
||||
{
|
||||
NioHttpRequest innerHttpRequest;
|
||||
try {
|
||||
innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest);
|
||||
} catch (final RestRequest.ContentTypeHeaderException e) {
|
||||
badRequestCause = e;
|
||||
innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause);
|
||||
} catch (final RestRequest.BadParameterException e) {
|
||||
badRequestCause = e;
|
||||
innerHttpRequest = requestWithoutParameters(copiedRequest);
|
||||
}
|
||||
httpRequest = innerHttpRequest;
|
||||
}
|
||||
|
||||
/*
|
||||
* We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid
|
||||
* parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an
|
||||
* IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of these
|
||||
* parameter values.
|
||||
*/
|
||||
final NioHttpChannel channel;
|
||||
{
|
||||
NioHttpChannel innerChannel;
|
||||
try {
|
||||
innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), httpRequest, settings, threadContext);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
if (badRequestCause == null) {
|
||||
/*
|
||||
* We want to create a REST request from the incoming request from Netty. However, creating this request could fail if there
|
||||
* are incorrectly encoded parameters, or the Content-Type header is invalid. If one of these specific failures occurs, we
|
||||
* attempt to create a REST request again without the input that caused the exception (e.g., we remove the Content-Type header,
|
||||
* or skip decoding the parameters). Once we have a request in hand, we then dispatch the request as a bad request with the
|
||||
* underlying exception that caused us to treat the request as bad.
|
||||
*/
|
||||
final NioHttpRequest httpRequest;
|
||||
{
|
||||
NioHttpRequest innerHttpRequest;
|
||||
try {
|
||||
innerHttpRequest = new NioHttpRequest(xContentRegistry, copiedRequest);
|
||||
} catch (final RestRequest.ContentTypeHeaderException e) {
|
||||
badRequestCause = e;
|
||||
} else {
|
||||
badRequestCause.addSuppressed(e);
|
||||
innerHttpRequest = requestWithoutContentTypeHeader(copiedRequest, badRequestCause);
|
||||
} catch (final RestRequest.BadParameterException e) {
|
||||
badRequestCause = e;
|
||||
innerHttpRequest = requestWithoutParameters(copiedRequest);
|
||||
}
|
||||
final NioHttpRequest innerRequest =
|
||||
new NioHttpRequest(
|
||||
xContentRegistry,
|
||||
Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters
|
||||
copiedRequest.uri(),
|
||||
copiedRequest);
|
||||
innerChannel = new NioHttpChannel(nioChannel, transport.getBigArrays(), innerRequest, settings, threadContext);
|
||||
httpRequest = innerHttpRequest;
|
||||
}
|
||||
channel = innerChannel;
|
||||
}
|
||||
|
||||
if (request.decoderResult().isFailure()) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause());
|
||||
} else if (badRequestCause != null) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, badRequestCause);
|
||||
} else {
|
||||
transport.dispatchRequest(httpRequest, channel);
|
||||
/*
|
||||
* We now want to create a channel used to send the response on. However, creating this channel can fail if there are invalid
|
||||
* parameter values for any of the filter_path, human, or pretty parameters. We detect these specific failures via an
|
||||
* IllegalArgumentException from the channel constructor and then attempt to create a new channel that bypasses parsing of
|
||||
* these parameter values.
|
||||
*/
|
||||
final NioHttpChannel channel;
|
||||
{
|
||||
NioHttpChannel innerChannel;
|
||||
int sequence = pipelinedRequest.getSequence();
|
||||
BigArrays bigArrays = transport.getBigArrays();
|
||||
try {
|
||||
innerChannel = new NioHttpChannel(nioChannel, bigArrays, httpRequest, sequence, settings, threadContext);
|
||||
} catch (final IllegalArgumentException e) {
|
||||
if (badRequestCause == null) {
|
||||
badRequestCause = e;
|
||||
} else {
|
||||
badRequestCause.addSuppressed(e);
|
||||
}
|
||||
final NioHttpRequest innerRequest =
|
||||
new NioHttpRequest(
|
||||
xContentRegistry,
|
||||
Collections.emptyMap(), // we are going to dispatch the request as a bad request, drop all parameters
|
||||
copiedRequest.uri(),
|
||||
copiedRequest);
|
||||
innerChannel = new NioHttpChannel(nioChannel, bigArrays, innerRequest, sequence, settings, threadContext);
|
||||
}
|
||||
channel = innerChannel;
|
||||
}
|
||||
|
||||
if (request.decoderResult().isFailure()) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, request.decoderResult().cause());
|
||||
} else if (badRequestCause != null) {
|
||||
transport.dispatchBadRequest(httpRequest, channel, badRequestCause);
|
||||
} else {
|
||||
transport.dispatchRequest(httpRequest, channel);
|
||||
}
|
||||
} finally {
|
||||
// As we have copied the buffer, we can release the request
|
||||
request.release();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.nio.SocketChannelContext;
|
||||
import org.elasticsearch.nio.WriteOperation;
|
||||
|
||||
|
@ -28,10 +27,10 @@ import java.util.function.BiConsumer;
|
|||
public class HttpWriteOperation implements WriteOperation {
|
||||
|
||||
private final SocketChannelContext channelContext;
|
||||
private final FullHttpResponse response;
|
||||
private final NioHttpResponse response;
|
||||
private final BiConsumer<Void, Throwable> listener;
|
||||
|
||||
HttpWriteOperation(SocketChannelContext channelContext, FullHttpResponse response, BiConsumer<Void, Throwable> listener) {
|
||||
HttpWriteOperation(SocketChannelContext channelContext, NioHttpResponse response, BiConsumer<Void, Throwable> listener) {
|
||||
this.channelContext = channelContext;
|
||||
this.response = response;
|
||||
this.listener = listener;
|
||||
|
@ -48,7 +47,7 @@ public class HttpWriteOperation implements WriteOperation {
|
|||
}
|
||||
|
||||
@Override
|
||||
public FullHttpResponse getObject() {
|
||||
public NioHttpResponse getObject() {
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,12 +53,7 @@ public class NettyAdaptor implements AutoCloseable {
|
|||
try {
|
||||
ByteBuf message = (ByteBuf) msg;
|
||||
promise.addListener((f) -> message.release());
|
||||
NettyListener listener;
|
||||
if (promise instanceof NettyListener) {
|
||||
listener = (NettyListener) promise;
|
||||
} else {
|
||||
listener = new NettyListener(promise);
|
||||
}
|
||||
NettyListener listener = NettyListener.fromChannelPromise(promise);
|
||||
flushOperations.add(new FlushOperation(message.nioBuffers(), listener));
|
||||
} catch (Exception e) {
|
||||
promise.setFailure(e);
|
||||
|
@ -107,18 +102,7 @@ public class NettyAdaptor implements AutoCloseable {
|
|||
}
|
||||
|
||||
public void write(WriteOperation writeOperation) {
|
||||
ChannelPromise channelPromise = nettyChannel.newPromise();
|
||||
channelPromise.addListener(f -> {
|
||||
BiConsumer<Void, Throwable> consumer = writeOperation.getListener();
|
||||
if (f.cause() == null) {
|
||||
consumer.accept(null, null);
|
||||
} else {
|
||||
ExceptionsHelper.dieOnError(f.cause());
|
||||
consumer.accept(null, f.cause());
|
||||
}
|
||||
});
|
||||
|
||||
nettyChannel.writeAndFlush(writeOperation.getObject(), new NettyListener(channelPromise));
|
||||
nettyChannel.writeAndFlush(writeOperation.getObject(), NettyListener.fromBiConsumer(writeOperation.getListener(), nettyChannel));
|
||||
}
|
||||
|
||||
public FlushOperation pollOutboundOperation() {
|
||||
|
|
|
@ -23,7 +23,7 @@ import io.netty.channel.Channel;
|
|||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.util.concurrent.Future;
|
||||
import io.netty.util.concurrent.GenericFutureListener;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
@ -40,7 +40,7 @@ public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromis
|
|||
|
||||
private final ChannelPromise promise;
|
||||
|
||||
NettyListener(ChannelPromise promise) {
|
||||
private NettyListener(ChannelPromise promise) {
|
||||
this.promise = promise;
|
||||
}
|
||||
|
||||
|
@ -211,4 +211,30 @@ public class NettyListener implements BiConsumer<Void, Throwable>, ChannelPromis
|
|||
public ChannelPromise unvoid() {
|
||||
return promise.unvoid();
|
||||
}
|
||||
|
||||
public static NettyListener fromBiConsumer(BiConsumer<Void, Throwable> biConsumer, Channel channel) {
|
||||
if (biConsumer instanceof NettyListener) {
|
||||
return (NettyListener) biConsumer;
|
||||
} else {
|
||||
ChannelPromise channelPromise = channel.newPromise();
|
||||
channelPromise.addListener(f -> {
|
||||
if (f.cause() == null) {
|
||||
biConsumer.accept(null, null);
|
||||
} else {
|
||||
ExceptionsHelper.dieOnError(f.cause());
|
||||
biConsumer.accept(null, f.cause());
|
||||
}
|
||||
});
|
||||
|
||||
return new NettyListener(channelPromise);
|
||||
}
|
||||
}
|
||||
|
||||
public static NettyListener fromChannelPromise(ChannelPromise channelPromise) {
|
||||
if (channelPromise instanceof NettyListener) {
|
||||
return (NettyListener) channelPromise;
|
||||
} else {
|
||||
return new NettyListener(channelPromise);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,20 +52,23 @@ import java.util.EnumMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
public class NioHttpChannel extends AbstractRestChannel {
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
private final int sequence;
|
||||
private final ThreadContext threadContext;
|
||||
private final FullHttpRequest nettyRequest;
|
||||
private final NioSocketChannel nioChannel;
|
||||
private final boolean resetCookies;
|
||||
|
||||
NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request,
|
||||
NioHttpChannel(NioSocketChannel nioChannel, BigArrays bigArrays, NioHttpRequest request, int sequence,
|
||||
HttpHandlingSettings settings, ThreadContext threadContext) {
|
||||
super(request, settings.getDetailedErrorsEnabled());
|
||||
this.nioChannel = nioChannel;
|
||||
this.bigArrays = bigArrays;
|
||||
this.sequence = sequence;
|
||||
this.threadContext = threadContext;
|
||||
this.nettyRequest = request.getRequest();
|
||||
this.resetCookies = settings.isResetCookies();
|
||||
|
@ -117,9 +120,8 @@ public class NioHttpChannel extends AbstractRestChannel {
|
|||
toClose.add(nioChannel::close);
|
||||
}
|
||||
|
||||
nioChannel.getContext().sendMessage(resp, (aVoid, throwable) -> {
|
||||
Releasables.close(toClose);
|
||||
});
|
||||
BiConsumer<Void, Throwable> listener = (aVoid, throwable) -> Releasables.close(toClose);
|
||||
nioChannel.getContext().sendMessage(new NioHttpResponse(sequence, resp), listener);
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
|
|
|
@ -0,0 +1,103 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.channel.ChannelDuplexHandler;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.http.HttpPipeliningAggregator;
|
||||
import org.elasticsearch.http.nio.NettyListener;
|
||||
import org.elasticsearch.http.nio.NioHttpResponse;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Implements HTTP pipelining ordering, ensuring that responses are completely served in the same order as their corresponding requests.
|
||||
*/
|
||||
public class NioHttpPipeliningHandler extends ChannelDuplexHandler {
|
||||
|
||||
private final Logger logger;
|
||||
private final HttpPipeliningAggregator<NioHttpResponse, NettyListener> aggregator;
|
||||
|
||||
/**
|
||||
* Construct a new pipelining handler; this handler should be used downstream of HTTP decoding/aggregation.
|
||||
*
|
||||
* @param logger for logging unexpected errors
|
||||
* @param maxEventsHeld the maximum number of channel events that will be retained prior to aborting the channel connection; this is
|
||||
* required as events cannot queue up indefinitely
|
||||
*/
|
||||
public NioHttpPipeliningHandler(Logger logger, final int maxEventsHeld) {
|
||||
this.logger = logger;
|
||||
this.aggregator = new HttpPipeliningAggregator<>(maxEventsHeld);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void channelRead(final ChannelHandlerContext ctx, final Object msg) {
|
||||
if (msg instanceof LastHttpContent) {
|
||||
HttpPipelinedRequest<LastHttpContent> pipelinedRequest = aggregator.read(((LastHttpContent) msg).retain());
|
||||
ctx.fireChannelRead(pipelinedRequest);
|
||||
} else {
|
||||
ctx.fireChannelRead(msg);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) {
|
||||
assert msg instanceof NioHttpResponse : "Message must be type: " + NioHttpResponse.class;
|
||||
NioHttpResponse response = (NioHttpResponse) msg;
|
||||
boolean success = false;
|
||||
try {
|
||||
NettyListener listener = NettyListener.fromChannelPromise(promise);
|
||||
List<Tuple<NioHttpResponse, NettyListener>> readyResponses = aggregator.write(response, listener);
|
||||
success = true;
|
||||
for (Tuple<NioHttpResponse, NettyListener> responseToWrite : readyResponses) {
|
||||
ctx.write(responseToWrite.v1().getResponse(), responseToWrite.v2());
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
ctx.channel().close();
|
||||
} finally {
|
||||
if (success == false) {
|
||||
promise.setFailure(new ClosedChannelException());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(ChannelHandlerContext ctx, ChannelPromise promise) {
|
||||
List<Tuple<NioHttpResponse, NettyListener>> inflightResponses = aggregator.removeAllInflightResponses();
|
||||
|
||||
if (inflightResponses.isEmpty() == false) {
|
||||
ClosedChannelException closedChannelException = new ClosedChannelException();
|
||||
for (Tuple<NioHttpResponse, NettyListener> inflightResponse : inflightResponses) {
|
||||
try {
|
||||
inflightResponse.v2().setFailure(closedChannelException);
|
||||
} catch (RuntimeException e) {
|
||||
logger.error("unexpected error while releasing pipelined http responses", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.close(promise);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.http.HttpPipelinedMessage;
|
||||
|
||||
public class NioHttpResponse extends HttpPipelinedMessage {
|
||||
|
||||
private final FullHttpResponse response;
|
||||
|
||||
public NioHttpResponse(int sequence, FullHttpResponse response) {
|
||||
super(sequence);
|
||||
this.response = response;
|
||||
}
|
||||
|
||||
public FullHttpResponse getResponse() {
|
||||
return response;
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.timeout.ReadTimeoutException;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
|
@ -84,6 +85,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_D
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_REUSE_ADDRESS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_SEND_BUFFER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
|
||||
public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
||||
|
||||
|
@ -124,6 +126,7 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
ByteSizeValue maxChunkSize = SETTING_HTTP_MAX_CHUNK_SIZE.get(settings);
|
||||
ByteSizeValue maxHeaderSize = SETTING_HTTP_MAX_HEADER_SIZE.get(settings);
|
||||
ByteSizeValue maxInitialLineLength = SETTING_HTTP_MAX_INITIAL_LINE_LENGTH.get(settings);
|
||||
int pipeliningMaxEvents = SETTING_PIPELINING_MAX_EVENTS.get(settings);
|
||||
this.httpHandlingSettings = new HttpHandlingSettings(Math.toIntExact(maxContentLength.getBytes()),
|
||||
Math.toIntExact(maxChunkSize.getBytes()),
|
||||
Math.toIntExact(maxHeaderSize.getBytes()),
|
||||
|
@ -131,7 +134,8 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
SETTING_HTTP_RESET_COOKIES.get(settings),
|
||||
SETTING_HTTP_COMPRESSION.get(settings),
|
||||
SETTING_HTTP_COMPRESSION_LEVEL.get(settings),
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings));
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings),
|
||||
pipeliningMaxEvents);
|
||||
|
||||
this.tcpNoDelay = SETTING_HTTP_TCP_NO_DELAY.get(settings);
|
||||
this.tcpKeepAlive = SETTING_HTTP_TCP_KEEP_ALIVE.get(settings);
|
||||
|
@ -140,23 +144,29 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
this.tcpReceiveBufferSize = Math.toIntExact(SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings).getBytes());
|
||||
|
||||
|
||||
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength);
|
||||
logger.debug("using max_chunk_size[{}], max_header_size[{}], max_initial_line_length[{}], max_content_length[{}]," +
|
||||
" pipelining_max_events[{}]",
|
||||
maxChunkSize, maxHeaderSize, maxInitialLineLength, maxContentLength, pipeliningMaxEvents);
|
||||
}
|
||||
|
||||
BigArrays getBigArrays() {
|
||||
return bigArrays;
|
||||
}
|
||||
|
||||
public Logger getLogger() {
|
||||
return logger;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
boolean success = false;
|
||||
try {
|
||||
int acceptorCount = NIO_HTTP_ACCEPTOR_COUNT.get(settings);
|
||||
int workerCount = NIO_HTTP_WORKER_COUNT.get(settings);
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX),
|
||||
workerCount, SocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
(s) -> new AcceptorEventHandler(s, this::nonChannelExceptionCaught),
|
||||
daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), workerCount,
|
||||
() -> new SocketEventHandler(this::nonChannelExceptionCaught));
|
||||
channelFactory = new HttpChannelFactory();
|
||||
this.boundAddress = createBoundHttpAddress();
|
||||
|
||||
|
@ -265,6 +275,10 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
}
|
||||
}
|
||||
|
||||
protected void nonChannelExceptionCaught(Exception ex) {
|
||||
logger.warn(new ParameterizedMessage("exception caught on transport layer [thread={}]", Thread.currentThread().getName()), ex);
|
||||
}
|
||||
|
||||
private void closeChannels(List<NioChannel> channels) {
|
||||
List<ActionFuture<Void>> futures = new ArrayList<>(channels.size());
|
||||
|
||||
|
@ -312,8 +326,10 @@ public class NioHttpServerTransport extends AbstractHttpServerTransport {
|
|||
@Override
|
||||
public NioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException {
|
||||
NioServerSocketChannel nioChannel = new NioServerSocketChannel(channel);
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioHttpServerTransport.this::acceptChannel,
|
||||
(e) -> {});
|
||||
Consumer<Exception> exceptionHandler = (e) -> logger.error(() ->
|
||||
new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e);
|
||||
Consumer<NioSocketChannel> acceptor = NioHttpServerTransport.this::acceptChannel;
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler);
|
||||
nioChannel.setContext(context);
|
||||
return nioChannel;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.transport.nio;
|
||||
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
|
@ -105,9 +106,10 @@ public class NioTransport extends TcpTransport {
|
|||
if (useNetworkServer) {
|
||||
acceptorCount = NioTransport.NIO_ACCEPTOR_COUNT.get(settings);
|
||||
}
|
||||
nioGroup = new NioGroup(logger, daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
AcceptorEventHandler::new, daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX),
|
||||
NioTransport.NIO_WORKER_COUNT.get(settings), SocketEventHandler::new);
|
||||
nioGroup = new NioGroup(daemonThreadFactory(this.settings, TRANSPORT_ACCEPTOR_THREAD_NAME_PREFIX), acceptorCount,
|
||||
(s) -> new AcceptorEventHandler(s, this::onNonChannelException),
|
||||
daemonThreadFactory(this.settings, TRANSPORT_WORKER_THREAD_NAME_PREFIX), NioTransport.NIO_WORKER_COUNT.get(settings),
|
||||
() -> new SocketEventHandler(this::onNonChannelException));
|
||||
|
||||
ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default");
|
||||
clientChannelFactory = channelFactory(clientProfileSettings, true);
|
||||
|
@ -193,8 +195,10 @@ public class NioTransport extends TcpTransport {
|
|||
@Override
|
||||
public TcpNioServerSocketChannel createServerChannel(AcceptingSelector selector, ServerSocketChannel channel) throws IOException {
|
||||
TcpNioServerSocketChannel nioChannel = new TcpNioServerSocketChannel(profileName, channel);
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, NioTransport.this::acceptChannel,
|
||||
(e) -> {});
|
||||
Consumer<Exception> exceptionHandler = (e) -> logger.error(() ->
|
||||
new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e);
|
||||
Consumer<NioSocketChannel> acceptor = NioTransport.this::acceptChannel;
|
||||
ServerChannelContext context = new ServerChannelContext(nioChannel, this, selector, acceptor, exceptionHandler);
|
||||
nioChannel.setContext(context);
|
||||
return nioChannel;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.elasticsearch;
|
|||
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.nio.NioHttpServerTransport;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.transport.nio.NioTransport;
|
||||
|
@ -43,11 +44,13 @@ public abstract class NioIntegTestCase extends ESIntegTestCase {
|
|||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal));
|
||||
// randomize netty settings
|
||||
// randomize nio settings
|
||||
if (randomBoolean()) {
|
||||
builder.put(NioTransport.NIO_WORKER_COUNT.getKey(), random().nextInt(3) + 1);
|
||||
builder.put(NioHttpServerTransport.NIO_HTTP_WORKER_COUNT.getKey(), random().nextInt(3) + 1);
|
||||
}
|
||||
builder.put(NetworkModule.TRANSPORT_TYPE_KEY, NioTransportPlugin.NIO_TRANSPORT_NAME);
|
||||
builder.put(NetworkModule.HTTP_TYPE_KEY, NioTransportPlugin.NIO_HTTP_TRANSPORT_NAME);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -61,11 +61,11 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_CHUN
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_PIPELINING_MAX_EVENTS;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.times;
|
||||
import static org.mockito.Mockito.verify;
|
||||
import static org.mockito.Mockito.verifyZeroInteractions;
|
||||
|
||||
public class HttpReadWriteHandlerTests extends ESTestCase {
|
||||
|
||||
|
@ -91,7 +91,8 @@ public class HttpReadWriteHandlerTests extends ESTestCase {
|
|||
SETTING_HTTP_RESET_COOKIES.getDefault(settings),
|
||||
SETTING_HTTP_COMPRESSION.getDefault(settings),
|
||||
SETTING_HTTP_COMPRESSION_LEVEL.getDefault(settings),
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings));
|
||||
SETTING_HTTP_DETAILED_ERRORS_ENABLED.getDefault(settings),
|
||||
SETTING_PIPELINING_MAX_EVENTS.getDefault(settings));
|
||||
ThreadContext threadContext = new ThreadContext(settings);
|
||||
nioSocketChannel = mock(NioSocketChannel.class);
|
||||
handler = new HttpReadWriteHandler(nioSocketChannel, transport, httpHandlingSettings, NamedXContentRegistry.EMPTY, threadContext);
|
||||
|
@ -148,7 +149,8 @@ public class HttpReadWriteHandlerTests extends ESTestCase {
|
|||
|
||||
handler.consumeReads(toChannelBuffer(buf));
|
||||
|
||||
verifyZeroInteractions(transport);
|
||||
verify(transport, times(0)).dispatchBadRequest(any(), any(), any());
|
||||
verify(transport, times(0)).dispatchRequest(any(), any());
|
||||
|
||||
List<FlushOperation> flushOperations = handler.pollFlushOperations();
|
||||
assertFalse(flushOperations.isEmpty());
|
||||
|
@ -169,9 +171,10 @@ public class HttpReadWriteHandlerTests extends ESTestCase {
|
|||
prepareHandlerForResponse(handler);
|
||||
|
||||
FullHttpResponse fullHttpResponse = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
|
||||
NioHttpResponse pipelinedResponse = new NioHttpResponse(0, fullHttpResponse);
|
||||
|
||||
SocketChannelContext context = mock(SocketChannelContext.class);
|
||||
HttpWriteOperation writeOperation = new HttpWriteOperation(context, fullHttpResponse, mock(BiConsumer.class));
|
||||
HttpWriteOperation writeOperation = new HttpWriteOperation(context, pipelinedResponse, mock(BiConsumer.class));
|
||||
List<FlushOperation> flushOperations = handler.writeToBytes(writeOperation);
|
||||
|
||||
HttpResponse response = responseDecoder.decode(Unpooled.wrappedBuffer(flushOperations.get(0).getBuffersToWrite()));
|
||||
|
|
|
@ -0,0 +1,304 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.channel.ChannelHandlerContext;
|
||||
import io.netty.channel.ChannelPromise;
|
||||
import io.netty.channel.SimpleChannelInboundHandler;
|
||||
import io.netty.channel.embedded.EmbeddedChannel;
|
||||
import io.netty.handler.codec.http.DefaultFullHttpRequest;
|
||||
import io.netty.handler.codec.http.DefaultFullHttpResponse;
|
||||
import io.netty.handler.codec.http.DefaultHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import io.netty.handler.codec.http.HttpMethod;
|
||||
import io.netty.handler.codec.http.HttpRequest;
|
||||
import io.netty.handler.codec.http.HttpVersion;
|
||||
import io.netty.handler.codec.http.LastHttpContent;
|
||||
import io.netty.handler.codec.http.QueryStringDecoder;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.http.HttpPipelinedRequest;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
|
||||
import java.nio.channels.ClosedChannelException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.LinkedTransferQueue;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH;
|
||||
import static io.netty.handler.codec.http.HttpResponseStatus.OK;
|
||||
import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
|
||||
public class NioHttpPipeliningHandlerTests extends ESTestCase {
|
||||
|
||||
private final ExecutorService handlerService = Executors.newFixedThreadPool(randomIntBetween(4, 8));
|
||||
private final ExecutorService eventLoopService = Executors.newFixedThreadPool(1);
|
||||
private final Map<String, CountDownLatch> waitingRequests = new ConcurrentHashMap<>();
|
||||
private final Map<String, CountDownLatch> finishingRequests = new ConcurrentHashMap<>();
|
||||
|
||||
@After
|
||||
public void cleanup() throws Exception {
|
||||
waitingRequests.keySet().forEach(this::finishRequest);
|
||||
shutdownExecutorService();
|
||||
}
|
||||
|
||||
private CountDownLatch finishRequest(String url) {
|
||||
waitingRequests.get(url).countDown();
|
||||
return finishingRequests.get(url);
|
||||
}
|
||||
|
||||
private void shutdownExecutorService() throws InterruptedException {
|
||||
if (!handlerService.isShutdown()) {
|
||||
handlerService.shutdown();
|
||||
handlerService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
if (!eventLoopService.isShutdown()) {
|
||||
eventLoopService.shutdown();
|
||||
eventLoopService.awaitTermination(10, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWithFastSerializedRequests() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i)));
|
||||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
for (final String url : waitingRequests.keySet()) {
|
||||
latches.add(finishRequest(url));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i));
|
||||
}
|
||||
|
||||
assertTrue(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWhenSlowRequestsInDifferentOrder() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + String.valueOf(i)));
|
||||
}
|
||||
|
||||
// random order execution
|
||||
final List<String> urls = new ArrayList<>(waitingRequests.keySet());
|
||||
Randomness.shuffle(urls);
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
for (final String url : urls) {
|
||||
latches.add(finishRequest(url));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
assertReadHttpMessageHasContent(embeddedChannel, String.valueOf(i));
|
||||
}
|
||||
|
||||
assertTrue(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testThatPipeliningWorksWithChunkedRequests() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(
|
||||
new AggregateUrisAndHeadersHandler(),
|
||||
new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
final DefaultHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/" + i);
|
||||
embeddedChannel.writeInbound(request);
|
||||
embeddedChannel.writeInbound(LastHttpContent.EMPTY_LAST_CONTENT);
|
||||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
for (int i = numberOfRequests - 1; i >= 0; i--) {
|
||||
latches.add(finishRequest(Integer.toString(i)));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
assertReadHttpMessageHasContent(embeddedChannel, Integer.toString(i));
|
||||
}
|
||||
|
||||
assertTrue(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testThatPipeliningClosesConnectionWithTooManyEvents() throws InterruptedException {
|
||||
final int numberOfRequests = randomIntBetween(2, 128);
|
||||
final EmbeddedChannel embeddedChannel = new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests),
|
||||
new WorkEmulatorHandler());
|
||||
|
||||
for (int i = 0; i < 1 + numberOfRequests + 1; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + Integer.toString(i)));
|
||||
}
|
||||
|
||||
final List<CountDownLatch> latches = new ArrayList<>();
|
||||
final List<Integer> requests = IntStream.range(1, numberOfRequests + 1).boxed().collect(Collectors.toList());
|
||||
Randomness.shuffle(requests);
|
||||
|
||||
for (final Integer request : requests) {
|
||||
latches.add(finishRequest(request.toString()));
|
||||
}
|
||||
|
||||
for (final CountDownLatch latch : latches) {
|
||||
latch.await();
|
||||
}
|
||||
|
||||
finishRequest(Integer.toString(numberOfRequests + 1)).await();
|
||||
|
||||
embeddedChannel.flush();
|
||||
|
||||
assertFalse(embeddedChannel.isOpen());
|
||||
}
|
||||
|
||||
public void testPipeliningRequestsAreReleased() throws InterruptedException {
|
||||
final int numberOfRequests = 10;
|
||||
final EmbeddedChannel embeddedChannel =
|
||||
new EmbeddedChannel(new NioHttpPipeliningHandler(logger, numberOfRequests + 1));
|
||||
|
||||
for (int i = 0; i < numberOfRequests; i++) {
|
||||
embeddedChannel.writeInbound(createHttpRequest("/" + i));
|
||||
}
|
||||
|
||||
HttpPipelinedRequest<FullHttpRequest> inbound;
|
||||
ArrayList<HttpPipelinedRequest<FullHttpRequest>> requests = new ArrayList<>();
|
||||
while ((inbound = embeddedChannel.readInbound()) != null) {
|
||||
requests.add(inbound);
|
||||
}
|
||||
|
||||
ArrayList<ChannelPromise> promises = new ArrayList<>();
|
||||
for (int i = 1; i < requests.size(); ++i) {
|
||||
final FullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK);
|
||||
ChannelPromise promise = embeddedChannel.newPromise();
|
||||
promises.add(promise);
|
||||
int sequence = requests.get(i).getSequence();
|
||||
NioHttpResponse resp = new NioHttpResponse(sequence, httpResponse);
|
||||
embeddedChannel.writeAndFlush(resp, promise);
|
||||
}
|
||||
|
||||
for (ChannelPromise promise : promises) {
|
||||
assertFalse(promise.isDone());
|
||||
}
|
||||
embeddedChannel.close().syncUninterruptibly();
|
||||
for (ChannelPromise promise : promises) {
|
||||
assertTrue(promise.isDone());
|
||||
assertTrue(promise.cause() instanceof ClosedChannelException);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertReadHttpMessageHasContent(EmbeddedChannel embeddedChannel, String expectedContent) {
|
||||
FullHttpResponse response = (FullHttpResponse) embeddedChannel.outboundMessages().poll();
|
||||
assertNotNull("Expected response to exist, maybe you did not wait long enough?", response);
|
||||
assertNotNull("Expected response to have content " + expectedContent, response.content());
|
||||
String data = new String(ByteBufUtil.getBytes(response.content()), StandardCharsets.UTF_8);
|
||||
assertThat(data, is(expectedContent));
|
||||
}
|
||||
|
||||
private FullHttpRequest createHttpRequest(String uri) {
|
||||
return new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, uri);
|
||||
}
|
||||
|
||||
private static class AggregateUrisAndHeadersHandler extends SimpleChannelInboundHandler<HttpRequest> {
|
||||
|
||||
static final Queue<String> QUEUE_URI = new LinkedTransferQueue<>();
|
||||
|
||||
@Override
|
||||
protected void channelRead0(ChannelHandlerContext ctx, HttpRequest request) throws Exception {
|
||||
QUEUE_URI.add(request.uri());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class WorkEmulatorHandler extends SimpleChannelInboundHandler<HttpPipelinedRequest<LastHttpContent>> {
|
||||
|
||||
@Override
|
||||
protected void channelRead0(final ChannelHandlerContext ctx, HttpPipelinedRequest<LastHttpContent> pipelinedRequest) {
|
||||
LastHttpContent request = pipelinedRequest.getRequest();
|
||||
final QueryStringDecoder decoder;
|
||||
if (request instanceof FullHttpRequest) {
|
||||
decoder = new QueryStringDecoder(((FullHttpRequest)request).uri());
|
||||
} else {
|
||||
decoder = new QueryStringDecoder(AggregateUrisAndHeadersHandler.QUEUE_URI.poll());
|
||||
}
|
||||
|
||||
final String uri = decoder.path().replace("/", "");
|
||||
final ByteBuf content = Unpooled.copiedBuffer(uri, StandardCharsets.UTF_8);
|
||||
final DefaultFullHttpResponse httpResponse = new DefaultFullHttpResponse(HTTP_1_1, OK, content);
|
||||
httpResponse.headers().add(CONTENT_LENGTH, content.readableBytes());
|
||||
|
||||
final CountDownLatch waitingLatch = new CountDownLatch(1);
|
||||
waitingRequests.put(uri, waitingLatch);
|
||||
final CountDownLatch finishingLatch = new CountDownLatch(1);
|
||||
finishingRequests.put(uri, finishingLatch);
|
||||
|
||||
handlerService.submit(() -> {
|
||||
try {
|
||||
waitingLatch.await(1000, TimeUnit.SECONDS);
|
||||
final ChannelPromise promise = ctx.newPromise();
|
||||
eventLoopService.submit(() -> {
|
||||
ctx.write(new NioHttpResponse(pipelinedRequest.getSequence(), httpResponse), promise);
|
||||
finishingLatch.countDown();
|
||||
});
|
||||
} catch (InterruptedException e) {
|
||||
fail(e.toString());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -16,65 +16,53 @@
|
|||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
package org.elasticsearch.http.nio;
|
||||
|
||||
import io.netty.handler.codec.http.FullHttpResponse;
|
||||
import org.elasticsearch.ESNetty4IntegTestCase;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.NioIntegTestCase;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, supportsDedicatedMasters = false, numDataNodes = 1)
|
||||
public class Netty4PipeliningDisabledIT extends ESNetty4IntegTestCase {
|
||||
public class NioPipeliningIT extends NioIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected boolean addMockHttpTransport() {
|
||||
return false; // enable http
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("http.pipelining", false)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testThatNettyHttpServerDoesNotSupportPipelining() throws Exception {
|
||||
ensureGreen();
|
||||
String[] requests = new String[] {"/", "/_nodes/stats", "/", "/_cluster/state", "/", "/_nodes", "/"};
|
||||
public void testThatNioHttpServerSupportsPipelining() throws Exception {
|
||||
String[] requests = new String[]{"/", "/_nodes/stats", "/", "/_cluster/state", "/"};
|
||||
|
||||
HttpServerTransport httpServerTransport = internalCluster().getInstance(HttpServerTransport.class);
|
||||
TransportAddress[] boundAddresses = httpServerTransport.boundAddress().boundAddresses();
|
||||
TransportAddress transportAddress = (TransportAddress) randomFrom(boundAddresses);
|
||||
TransportAddress transportAddress = randomFrom(boundAddresses);
|
||||
|
||||
try (Netty4HttpClient nettyHttpClient = new Netty4HttpClient()) {
|
||||
Collection<FullHttpResponse> responses = nettyHttpClient.get(transportAddress.address(), requests);
|
||||
assertThat(responses, hasSize(requests.length));
|
||||
assertThat(responses, hasSize(5));
|
||||
|
||||
List<String> opaqueIds = new ArrayList<>(Netty4HttpClient.returnOpaqueIds(responses));
|
||||
|
||||
assertResponsesOutOfOrder(opaqueIds);
|
||||
Collection<String> opaqueIds = Netty4HttpClient.returnOpaqueIds(responses);
|
||||
assertOpaqueIdsInOrder(opaqueIds);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* checks if all responses are there, but also tests that they are out of order because pipelining is disabled
|
||||
*/
|
||||
private void assertResponsesOutOfOrder(List<String> opaqueIds) {
|
||||
String message = String.format(Locale.ROOT, "Expected returned http message ids to be in any order of: %s", opaqueIds);
|
||||
assertThat(message, opaqueIds, containsInAnyOrder("0", "1", "2", "3", "4", "5", "6"));
|
||||
private void assertOpaqueIdsInOrder(Collection<String> opaqueIds) {
|
||||
// check if opaque ids are monotonically increasing
|
||||
int i = 0;
|
||||
String msg = String.format(Locale.ROOT, "Expected list of opaque ids to be monotonically increasing, got [%s]", opaqueIds);
|
||||
for (String opaqueId : opaqueIds) {
|
||||
assertThat(msg, opaqueId, is(String.valueOf(i++)));
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -18,33 +18,8 @@
|
|||
*/
|
||||
package org.elasticsearch.upgrades;
|
||||
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.test.rest.ESRestTestCase;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public abstract class AbstractRollingTestCase extends ESRestTestCase {
|
||||
protected enum ClusterType {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue