Enable HTTP compression by default with compression level 3
With this commit we compress HTTP responses provided the client supports it (as indicated by the HTTP header 'Accept-Encoding'). We're also able to process compressed HTTP requests if needed. The default compression level is lowered from 6 to 3 as benchmarks have indicated that this reduces query latency with a negligible increase in network traffic. Closes #7309
This commit is contained in:
parent
0eed5cf083
commit
0a6f40c7f5
|
@ -50,9 +50,11 @@ public final class HttpTransportSettings {
|
|||
public static final Setting<Integer> SETTING_PIPELINING_MAX_EVENTS =
|
||||
Setting.intSetting("http.pipelining.max_events", 10000, Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_COMPRESSION =
|
||||
Setting.boolSetting("http.compression", false, Property.NodeScope);
|
||||
Setting.boolSetting("http.compression", true, Property.NodeScope);
|
||||
// we intentionally use a different compression level as Netty here as our benchmarks have shown that a compression level of 3 is the
|
||||
// best compromise between reduction in network traffic and added latency. For more details please check #7309.
|
||||
public static final Setting<Integer> SETTING_HTTP_COMPRESSION_LEVEL =
|
||||
Setting.intSetting("http.compression_level", 6, Property.NodeScope);
|
||||
Setting.intSetting("http.compression_level", 3, Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_HOST =
|
||||
listSetting("http.host", emptyList(), Function.identity(), Property.NodeScope);
|
||||
public static final Setting<List<String>> SETTING_HTTP_PUBLISH_HOST =
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.jboss.netty.buffer.ChannelBuffer;
|
||||
import org.jboss.netty.handler.codec.embedder.DecoderEmbedder;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentDecompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpHeaders;
|
||||
|
||||
public class ESHttpContentDecompressor extends HttpContentDecompressor {
|
||||
private final boolean compression;
|
||||
|
||||
public ESHttpContentDecompressor(boolean compression) {
|
||||
super();
|
||||
this.compression = compression;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DecoderEmbedder<ChannelBuffer> newContentDecoder(String contentEncoding) throws Exception {
|
||||
if (compression) {
|
||||
// compression is enabled so handle the request according to the headers (compressed and uncompressed)
|
||||
return super.newContentDecoder(contentEncoding);
|
||||
} else {
|
||||
// if compression is disabled only allow "identity" (uncompressed) requests
|
||||
if (HttpHeaders.Values.IDENTITY.equals(contentEncoding)) {
|
||||
// nothing to handle here
|
||||
return null;
|
||||
} else {
|
||||
throw new TransportException("Support for compressed content is disabled. You can enable it with http.compression=true");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -70,6 +70,7 @@ import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
|
|||
import org.jboss.netty.channel.socket.oio.OioServerSocketChannelFactory;
|
||||
import org.jboss.netty.handler.codec.http.HttpChunkAggregator;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentCompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpContentDecompressor;
|
||||
import org.jboss.netty.handler.codec.http.HttpMethod;
|
||||
import org.jboss.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import org.jboss.netty.handler.timeout.ReadTimeoutException;
|
||||
|
@ -544,19 +545,19 @@ public class NettyHttpServerTransport extends AbstractLifecycleComponent<HttpSer
|
|||
requestDecoder.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
|
||||
}
|
||||
pipeline.addLast("decoder", requestDecoder);
|
||||
pipeline.addLast("decoder_compress", new ESHttpContentDecompressor(transport.compression));
|
||||
pipeline.addLast("decoder_compress", new HttpContentDecompressor());
|
||||
HttpChunkAggregator httpChunkAggregator = new HttpChunkAggregator((int) transport.maxContentLength.bytes());
|
||||
if (transport.maxCompositeBufferComponents != -1) {
|
||||
httpChunkAggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents);
|
||||
}
|
||||
pipeline.addLast("aggregator", httpChunkAggregator);
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
pipeline.addLast("cors", new CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
pipeline.addLast("encoder", new ESHttpResponseEncoder());
|
||||
if (transport.compression) {
|
||||
pipeline.addLast("encoder_compress", new HttpContentCompressor(transport.compressionLevel));
|
||||
}
|
||||
if (SETTING_CORS_ENABLED.get(transport.settings())) {
|
||||
pipeline.addLast("cors", new CorsHandler(transport.getCorsConfig()));
|
||||
}
|
||||
if (transport.pipelining) {
|
||||
pipeline.addLast("pipelining", new HttpPipeliningHandler(transport.pipeliningMaxEvents));
|
||||
}
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.http.netty;
|
||||
|
||||
import org.apache.http.Header;
|
||||
import org.apache.http.HttpException;
|
||||
import org.apache.http.HttpHeaders;
|
||||
import org.apache.http.HttpResponseInterceptor;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.http.protocol.HttpContext;
|
||||
import org.elasticsearch.common.network.NetworkModule;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.http.HttpTransportSettings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.rest.client.http.HttpResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1, numClientNodes = 1)
|
||||
public class NettyHttpCompressionIT extends ESIntegTestCase {
|
||||
private static final String GZIP_ENCODING = "gzip";
|
||||
|
||||
private static final String SAMPLE_DOCUMENT = "{\n" +
|
||||
" \"name\": {\n" +
|
||||
" \"first name\": \"Steve\",\n" +
|
||||
" \"last name\": \"Jobs\"\n" +
|
||||
" }\n" +
|
||||
"}";
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
return Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(NetworkModule.HTTP_ENABLED.getKey(), true)
|
||||
.put(HttpTransportSettings.SETTING_HTTP_COMPRESSION.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testCompressesResponseIfRequested() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
// we need to intercept early, otherwise internal logic in HttpClient will just remove the header and we cannot verify it
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
CloseableHttpClient internalClient = HttpClients.custom().addInterceptorFirst(headerExtractor).build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient).path("/").addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING).execute();
|
||||
assertEquals(200, response.getStatusCode());
|
||||
assertTrue(headerExtractor.hasContentEncodingHeader());
|
||||
assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue());
|
||||
}
|
||||
|
||||
public void testUncompressedResponseByDefault() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
CloseableHttpClient internalClient = HttpClients
|
||||
.custom()
|
||||
.disableContentCompression()
|
||||
.addInterceptorFirst(headerExtractor)
|
||||
.build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient).path("/").execute();
|
||||
assertEquals(200, response.getStatusCode());
|
||||
assertFalse(headerExtractor.hasContentEncodingHeader());
|
||||
}
|
||||
|
||||
public void testCanInterpretUncompressedRequest() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
CloseableHttpClient internalClient = HttpClients
|
||||
.custom()
|
||||
// this disable content compression in both directions (request and response)
|
||||
.disableContentCompression()
|
||||
.addInterceptorFirst(headerExtractor)
|
||||
.build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient)
|
||||
.path("/company/employees/1")
|
||||
.method("POST")
|
||||
.body(SAMPLE_DOCUMENT)
|
||||
.execute();
|
||||
|
||||
assertEquals(201, response.getStatusCode());
|
||||
assertFalse(headerExtractor.hasContentEncodingHeader());
|
||||
}
|
||||
|
||||
public void testCanInterpretCompressedRequest() throws Exception {
|
||||
ensureGreen();
|
||||
|
||||
ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor();
|
||||
// we don't call #disableContentCompression() hence the client will send the content compressed
|
||||
CloseableHttpClient internalClient = HttpClients.custom().addInterceptorFirst(headerExtractor).build();
|
||||
|
||||
HttpResponse response = httpClient(internalClient)
|
||||
.path("/company/employees/2")
|
||||
.method("POST")
|
||||
.body(SAMPLE_DOCUMENT)
|
||||
.execute();
|
||||
|
||||
assertEquals(201, response.getStatusCode());
|
||||
assertTrue(headerExtractor.hasContentEncodingHeader());
|
||||
assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue());
|
||||
}
|
||||
|
||||
private static class ContentEncodingHeaderExtractor implements HttpResponseInterceptor {
|
||||
private Header contentEncodingHeader;
|
||||
|
||||
@Override
|
||||
public void process(org.apache.http.HttpResponse response, HttpContext context) throws HttpException, IOException {
|
||||
final Header[] headers = response.getHeaders(HttpHeaders.CONTENT_ENCODING);
|
||||
if (headers.length == 1) {
|
||||
this.contentEncodingHeader = headers[0];
|
||||
} else if (headers.length > 1) {
|
||||
throw new AssertionError("Expected none or one content encoding header but got " + headers.length + " headers.");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasContentEncodingHeader() {
|
||||
return contentEncodingHeader != null;
|
||||
}
|
||||
|
||||
public Header getContentEncodingHeader() {
|
||||
return contentEncodingHeader;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -36,6 +36,7 @@ way to do this is to upgrade to Elasticsearch 2.3 or later and to use the
|
|||
* <<breaking_50_index_apis>>
|
||||
* <<breaking_50_settings_changes>>
|
||||
* <<breaking_50_allocation>>
|
||||
* <<breaking_50_http_changes>>
|
||||
* <<breaking_50_rest_api_changes>>
|
||||
* <<breaking_50_cat_api>>
|
||||
* <<breaking_50_java_api_changes>>
|
||||
|
@ -60,6 +61,8 @@ include::migrate_5_0/settings.asciidoc[]
|
|||
|
||||
include::migrate_5_0/allocation.asciidoc[]
|
||||
|
||||
include::migrate_5_0/http.asciidoc[]
|
||||
|
||||
include::migrate_5_0/rest.asciidoc[]
|
||||
|
||||
include::migrate_5_0/cat.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
[[breaking_50_http_changes]]
|
||||
=== HTTP changes
|
||||
|
||||
==== Compressed HTTP requests are always accepted
|
||||
|
||||
Before 5.0, Elasticsearch accepted compressed HTTP requests only if the setting
|
||||
`http.compressed` was set to `true`. Elasticsearch accepts compressed requests
|
||||
now but will continue to send compressed responses only if `http.compressed`
|
||||
is set to `true`.
|
|
@ -48,10 +48,10 @@ to `4kb`
|
|||
|
||||
|
||||
|`http.compression` |Support for compression when possible (with
|
||||
Accept-Encoding). Defaults to `false`.
|
||||
Accept-Encoding). Defaults to `true`.
|
||||
|
||||
|`http.compression_level` |Defines the compression level to use.
|
||||
Defaults to `6`.
|
||||
|`http.compression_level` |Defines the compression level to use for HTTP responses. Valid values are in the range of 1 (minimum compression)
|
||||
and 9 (maximum compression). Defaults to `3`.
|
||||
|
||||
|`http.cors.enabled` |Enable or disable cross-origin resource sharing,
|
||||
i.e. whether a browser on another origin can do requests to
|
||||
|
|
|
@ -23,6 +23,7 @@ import com.carrotsearch.randomizedtesting.RandomizedTest;
|
|||
import com.carrotsearch.randomizedtesting.annotations.TestGroup;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClients;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
|
@ -2040,15 +2041,20 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
protected HttpRequestBuilder httpClient() {
|
||||
return httpClient(HttpClients.createDefault());
|
||||
}
|
||||
|
||||
protected HttpRequestBuilder httpClient(CloseableHttpClient httpClient) {
|
||||
final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get();
|
||||
final NodeInfo[] nodes = nodeInfos.getNodes();
|
||||
assertTrue(nodes.length > 0);
|
||||
TransportAddress publishAddress = randomFrom(nodes).getHttp().address().publishAddress();
|
||||
assertEquals(1, publishAddress.uniqueAddressTypeId());
|
||||
InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address();
|
||||
return new HttpRequestBuilder(HttpClients.createDefault()).host(NetworkAddress.format(address.getAddress())).port(address.getPort());
|
||||
return new HttpRequestBuilder(httpClient).host(NetworkAddress.format(address.getAddress())).port(address.getPort());
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This method is executed iff the test is annotated with {@link SuiteScopeTestCase}
|
||||
* before the first test of this class is executed.
|
||||
|
|
Loading…
Reference in New Issue