Merge branch 'master' into rankeval
This commit is contained in:
commit
97b25f3b0c
|
@ -119,11 +119,13 @@ Alternatively, `idea.no.launcher=true` can be set in the
|
|||
[`idea.properties`](https://www.jetbrains.com/help/idea/file-idea-properties.html)
|
||||
file which can be accessed under Help > Edit Custom Properties (this will require a
|
||||
restart of IDEA). For IDEA 2017.3 and above, in addition to the JVM option, you will need to go to
|
||||
`Run->Edit Configurations...` and change the value for the `Shorten command line` setting from
|
||||
`Run->Edit Configurations->...->Defaults->JUnit` and change the value for the `Shorten command line` setting from
|
||||
`user-local default: none` to `classpath file`. You may also need to [remove `ant-javafx.jar` from your
|
||||
classpath](https://github.com/elastic/elasticsearch/issues/14348) if that is
|
||||
reported as a source of jar hell.
|
||||
|
||||
To run an instance of elasticsearch from the source code run `gradle run`
|
||||
|
||||
The Elasticsearch codebase makes heavy use of Java `assert`s and the
|
||||
test runner requires that assertions be enabled within the JVM. This
|
||||
can be accomplished by passing the flag `-ea` to the JVM on startup.
|
||||
|
|
|
@ -307,4 +307,9 @@ Defaults env_keep += "BATS_ARCHIVES"
|
|||
SUDOERS_VARS
|
||||
chmod 0440 /etc/sudoers.d/elasticsearch_vars
|
||||
SHELL
|
||||
# This prevents leftovers from previous tests using the
|
||||
# same VM from messing up the current test
|
||||
config.vm.provision "clean_tmp", run: "always", type: "shell", inline: <<-SHELL
|
||||
rm -rf /tmp/elasticsearch*
|
||||
SHELL
|
||||
end
|
||||
|
|
|
@ -658,7 +658,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]RelocationIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]TruncatedRecoveryIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]BytesRestResponseTests.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasResolveRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]AliasRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]routing[/\\]SimpleRoutingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]FileScriptTests.java" checks="LineLength" />
|
||||
|
|
|
@ -25,6 +25,8 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -72,7 +74,7 @@ public final class IndicesClient {
|
|||
*/
|
||||
public CreateIndexResponse createIndex(CreateIndexRequest createIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,4 +88,27 @@ public final class IndicesClient {
|
|||
restHighLevelClient.performRequestAsyncAndParseEntity(createIndexRequest, Request::createIndex, CreateIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Opens an index using the Open Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public OpenIndexResponse openIndex(OpenIndexRequest openIndexRequest, Header... headers) throws IOException {
|
||||
return restHighLevelClient.performRequestAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
/**
|
||||
* Asynchronously opens an index using the Open Index API
|
||||
* <p>
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html">
|
||||
* Open Index API on elastic.co</a>
|
||||
*/
|
||||
public void openIndexAsync(OpenIndexRequest openIndexRequest, ActionListener<OpenIndexResponse> listener, Header... headers) {
|
||||
restHighLevelClient.performRequestAsyncAndParseEntity(openIndexRequest, Request::openIndex, OpenIndexResponse::fromXContent,
|
||||
listener, Collections.emptySet(), headers);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.lucene.util.BytesRef;
|
|||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.get.GetRequest;
|
||||
|
@ -138,6 +139,19 @@ public final class Request {
|
|||
return new Request(HttpDelete.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request openIndex(OpenIndexRequest openIndexRequest) {
|
||||
String endpoint = endpoint(openIndexRequest.indices(), Strings.EMPTY_ARRAY, "_open");
|
||||
|
||||
Params parameters = Params.builder();
|
||||
|
||||
parameters.withTimeout(openIndexRequest.timeout());
|
||||
parameters.withMasterTimeout(openIndexRequest.masterNodeTimeout());
|
||||
parameters.withWaitForActiveShards(openIndexRequest.waitForActiveShards());
|
||||
parameters.withIndicesOptions(openIndexRequest.indicesOptions());
|
||||
|
||||
return new Request(HttpPost.METHOD_NAME, endpoint, parameters.getParams(), null);
|
||||
}
|
||||
|
||||
static Request createIndex(CreateIndexRequest createIndexRequest) throws IOException {
|
||||
String endpoint = endpoint(createIndexRequest.indices(), Strings.EMPTY_ARRAY, "");
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ import org.elasticsearch.ElasticsearchStatusException;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -283,7 +285,7 @@ public class RestHighLevelClient implements Closeable {
|
|||
*
|
||||
* See <a href="https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html">Get API on elastic.co</a>
|
||||
*/
|
||||
public void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
public final void getAsync(GetRequest getRequest, ActionListener<GetResponse> listener, Header... headers) {
|
||||
performRequestAsyncAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, listener, singleton(404), headers);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,14 +25,21 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
|||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
|
@ -128,16 +135,73 @@ public class IndicesClientIT extends ESRestHighLevelClientTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testOpenExistingIndex() throws IOException {
|
||||
String[] indices = randomIndices(1, 5);
|
||||
for (String index : indices) {
|
||||
createIndex(index);
|
||||
closeIndex(index);
|
||||
ResponseException exception = expectThrows(ResponseException.class, () -> client().performRequest("GET", index + "/_search"));
|
||||
assertThat(exception.getResponse().getStatusLine().getStatusCode(), equalTo(RestStatus.BAD_REQUEST.getStatus()));
|
||||
assertThat(exception.getMessage().contains(index), equalTo(true));
|
||||
}
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(indices);
|
||||
OpenIndexResponse openIndexResponse = execute(openIndexRequest, highLevelClient().indices()::openIndex,
|
||||
highLevelClient().indices()::openIndexAsync);
|
||||
assertTrue(openIndexResponse.isAcknowledged());
|
||||
|
||||
for (String index : indices) {
|
||||
Response response = client().performRequest("GET", index + "/_search");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
}
|
||||
|
||||
public void testOpenNonExistentIndex() throws IOException {
|
||||
String[] nonExistentIndices = randomIndices(1, 5);
|
||||
for (String nonExistentIndex : nonExistentIndices) {
|
||||
assertFalse(indexExists(nonExistentIndex));
|
||||
}
|
||||
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
ElasticsearchException exception = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, exception.status());
|
||||
|
||||
OpenIndexRequest lenientOpenIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
lenientOpenIndexRequest.indicesOptions(IndicesOptions.lenientExpandOpen());
|
||||
OpenIndexResponse lenientOpenIndexResponse = execute(lenientOpenIndexRequest, highLevelClient().indices()::openIndex,
|
||||
highLevelClient().indices()::openIndexAsync);
|
||||
assertThat(lenientOpenIndexResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
OpenIndexRequest strictOpenIndexRequest = new OpenIndexRequest(nonExistentIndices);
|
||||
strictOpenIndexRequest.indicesOptions(IndicesOptions.strictExpandOpen());
|
||||
ElasticsearchException strictException = expectThrows(ElasticsearchException.class,
|
||||
() -> execute(openIndexRequest, highLevelClient().indices()::openIndex, highLevelClient().indices()::openIndexAsync));
|
||||
assertEquals(RestStatus.NOT_FOUND, strictException.status());
|
||||
}
|
||||
|
||||
private static String[] randomIndices(int minIndicesNum, int maxIndicesNum) {
|
||||
int numIndices = randomIntBetween(minIndicesNum, maxIndicesNum);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5).toLowerCase(Locale.ROOT);
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
||||
private static void createIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("PUT", index);
|
||||
|
||||
assertEquals(200, response.getStatusLine().getStatusCode());
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
|
||||
private static boolean indexExists(String index) throws IOException {
|
||||
Response response = client().performRequest("HEAD", index);
|
||||
return RestStatus.OK.getStatus() == response.getStatusLine().getStatusCode();
|
||||
}
|
||||
|
||||
return response.getStatusLine().getStatusCode() == 200;
|
||||
private static void closeIndex(String index) throws IOException {
|
||||
Response response = client().performRequest("POST", index + "/_close");
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.OK.getStatus()));
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.http.util.EntityUtils;
|
|||
import org.elasticsearch.action.DocWriteRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkShardRequest;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
|
@ -42,7 +43,6 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.action.support.WriteRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicatedWriteRequest;
|
||||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.common.CheckedBiConsumer;
|
||||
|
@ -83,7 +83,6 @@ import java.util.List;
|
|||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
|
@ -93,10 +92,12 @@ import static org.elasticsearch.client.Request.REQUEST_BODY_CONTENT_TYPE;
|
|||
import static org.elasticsearch.client.Request.enforceSameContentType;
|
||||
import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class RequestTests extends ESTestCase {
|
||||
|
||||
public void testConstructor() throws Exception {
|
||||
public void testConstructor() {
|
||||
final String method = randomFrom("GET", "PUT", "POST", "HEAD", "DELETE");
|
||||
final String endpoint = randomAlphaOfLengthBetween(1, 10);
|
||||
final Map<String, String> parameters = singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5));
|
||||
|
@ -122,7 +123,7 @@ public class RequestTests extends ESTestCase {
|
|||
assertTrue("Request constructor is not public", Modifier.isPublic(constructors[0].getModifiers()));
|
||||
}
|
||||
|
||||
public void testClassVisibility() throws Exception {
|
||||
public void testClassVisibility() {
|
||||
assertTrue("Request class is not public", Modifier.isPublic(Request.class.getModifiers()));
|
||||
}
|
||||
|
||||
|
@ -146,7 +147,7 @@ public class RequestTests extends ESTestCase {
|
|||
getAndExistsTest(Request::get, "GET");
|
||||
}
|
||||
|
||||
public void testDelete() throws IOException {
|
||||
public void testDelete() {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
String id = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -155,9 +156,9 @@ public class RequestTests extends ESTestCase {
|
|||
Map<String, String> expectedParams = new HashMap<>();
|
||||
|
||||
setRandomTimeout(deleteRequest::timeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
setRandomRefreshPolicy(deleteRequest, expectedParams);
|
||||
setRandomRefreshPolicy(deleteRequest::setRefreshPolicy, expectedParams);
|
||||
setRandomVersion(deleteRequest, expectedParams);
|
||||
setRandomVersionType(deleteRequest, expectedParams);
|
||||
setRandomVersionType(deleteRequest::versionType, expectedParams);
|
||||
|
||||
if (frequently()) {
|
||||
if (randomBoolean()) {
|
||||
|
@ -222,27 +223,13 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
getRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
setRandomVersionType(getRequest::versionType, expectedParams);
|
||||
if (randomBoolean()) {
|
||||
int numStoredFields = randomIntBetween(1, 10);
|
||||
String[] storedFields = new String[numStoredFields];
|
||||
StringBuilder storedFieldsParam = new StringBuilder();
|
||||
for (int i = 0; i < numStoredFields; i++) {
|
||||
String storedField = randomAlphaOfLengthBetween(3, 10);
|
||||
storedFields[i] = storedField;
|
||||
storedFieldsParam.append(storedField);
|
||||
if (i < numStoredFields - 1) {
|
||||
storedFieldsParam.append(",");
|
||||
}
|
||||
}
|
||||
String storedFieldsParam = randomFields(storedFields);
|
||||
getRequest.storedFields(storedFields);
|
||||
expectedParams.put("stored_fields", storedFieldsParam.toString());
|
||||
expectedParams.put("stored_fields", storedFieldsParam);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
randomizeFetchSourceContextParams(getRequest::fetchSourceContext, expectedParams);
|
||||
|
@ -283,7 +270,7 @@ public class RequestTests extends ESTestCase {
|
|||
assertToXContentBody(createIndexRequest, request.getEntity());
|
||||
}
|
||||
|
||||
public void testDeleteIndex() throws IOException {
|
||||
public void testDeleteIndex() {
|
||||
DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest();
|
||||
|
||||
int numIndices = randomIntBetween(0, 5);
|
||||
|
@ -307,6 +294,29 @@ public class RequestTests extends ESTestCase {
|
|||
assertNull(request.getEntity());
|
||||
}
|
||||
|
||||
public void testOpenIndex() {
|
||||
OpenIndexRequest openIndexRequest = new OpenIndexRequest();
|
||||
int numIndices = randomIntBetween(1, 5);
|
||||
String[] indices = new String[numIndices];
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
indices[i] = "index-" + randomAlphaOfLengthBetween(2, 5);
|
||||
}
|
||||
openIndexRequest.indices(indices);
|
||||
|
||||
Map<String, String> expectedParams = new HashMap<>();
|
||||
setRandomTimeout(openIndexRequest::timeout, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, expectedParams);
|
||||
setRandomMasterTimeout(openIndexRequest, expectedParams);
|
||||
setRandomIndicesOptions(openIndexRequest::indicesOptions, openIndexRequest::indicesOptions, expectedParams);
|
||||
setRandomWaitForActiveShards(openIndexRequest::waitForActiveShards, expectedParams);
|
||||
|
||||
Request request = Request.openIndex(openIndexRequest);
|
||||
StringJoiner endpoint = new StringJoiner("/", "/", "").add(String.join(",", indices)).add("_open");
|
||||
assertThat(endpoint.toString(), equalTo(request.getEndpoint()));
|
||||
assertThat(expectedParams, equalTo(request.getParameters()));
|
||||
assertThat(request.getMethod(), equalTo("POST"));
|
||||
assertThat(request.getEntity(), nullValue());
|
||||
}
|
||||
|
||||
public void testIndex() throws IOException {
|
||||
String index = randomAlphaOfLengthBetween(3, 10);
|
||||
String type = randomAlphaOfLengthBetween(3, 10);
|
||||
|
@ -326,7 +336,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
|
||||
setRandomTimeout(indexRequest::timeout, ReplicationRequest.DEFAULT_TIMEOUT, expectedParams);
|
||||
setRandomRefreshPolicy(indexRequest, expectedParams);
|
||||
setRandomRefreshPolicy(indexRequest::setRefreshPolicy, expectedParams);
|
||||
|
||||
// There is some logic around _create endpoint and version/version type
|
||||
if (indexRequest.opType() == DocWriteRequest.OpType.CREATE) {
|
||||
|
@ -334,7 +344,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("version", Long.toString(Versions.MATCH_DELETED));
|
||||
} else {
|
||||
setRandomVersion(indexRequest, expectedParams);
|
||||
setRandomVersionType(indexRequest, expectedParams);
|
||||
setRandomVersionType(indexRequest::versionType, expectedParams);
|
||||
}
|
||||
|
||||
if (frequently()) {
|
||||
|
@ -438,20 +448,8 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
setRandomWaitForActiveShards(updateRequest::waitForActiveShards, expectedParams);
|
||||
if (randomBoolean()) {
|
||||
long version = randomLong();
|
||||
updateRequest.version(version);
|
||||
if (version != Versions.MATCH_ANY) {
|
||||
expectedParams.put("version", Long.toString(version));
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
updateRequest.versionType(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
setRandomVersion(updateRequest, expectedParams);
|
||||
setRandomVersionType(updateRequest::versionType, expectedParams);
|
||||
if (randomBoolean()) {
|
||||
int retryOnConflict = randomIntBetween(0, 5);
|
||||
updateRequest.retryOnConflict(retryOnConflict);
|
||||
|
@ -495,7 +493,7 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testUpdateWithDifferentContentTypes() throws IOException {
|
||||
public void testUpdateWithDifferentContentTypes() {
|
||||
IllegalStateException exception = expectThrows(IllegalStateException.class, () -> {
|
||||
UpdateRequest updateRequest = new UpdateRequest();
|
||||
updateRequest.doc(new IndexRequest().source(singletonMap("field", "doc"), XContentType.JSON));
|
||||
|
@ -518,13 +516,7 @@ public class RequestTests extends ESTestCase {
|
|||
expectedParams.put("timeout", BulkShardRequest.DEFAULT_TIMEOUT.getStringRep());
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
bulkRequest.setRefreshPolicy(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
}
|
||||
setRandomRefreshPolicy(bulkRequest::setRefreshPolicy, expectedParams);
|
||||
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE);
|
||||
|
||||
|
@ -537,7 +529,7 @@ public class RequestTests extends ESTestCase {
|
|||
BytesReference source = RandomObjects.randomSource(random(), xContentType);
|
||||
DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values());
|
||||
|
||||
DocWriteRequest<?> docWriteRequest = null;
|
||||
DocWriteRequest<?> docWriteRequest;
|
||||
if (opType == DocWriteRequest.OpType.INDEX) {
|
||||
IndexRequest indexRequest = new IndexRequest(index, type, id).source(source, xContentType);
|
||||
docWriteRequest = indexRequest;
|
||||
|
@ -567,6 +559,8 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
} else if (opType == DocWriteRequest.OpType.DELETE) {
|
||||
docWriteRequest = new DeleteRequest(index, type, id);
|
||||
} else {
|
||||
throw new UnsupportedOperationException("optype [" + opType + "] not supported");
|
||||
}
|
||||
|
||||
if (randomBoolean()) {
|
||||
|
@ -971,31 +965,15 @@ public class RequestTests extends ESTestCase {
|
|||
} else {
|
||||
int numIncludes = randomIntBetween(0, 5);
|
||||
String[] includes = new String[numIncludes];
|
||||
StringBuilder includesParam = new StringBuilder();
|
||||
for (int i = 0; i < numIncludes; i++) {
|
||||
String include = randomAlphaOfLengthBetween(3, 10);
|
||||
includes[i] = include;
|
||||
includesParam.append(include);
|
||||
if (i < numIncludes - 1) {
|
||||
includesParam.append(",");
|
||||
}
|
||||
}
|
||||
String includesParam = randomFields(includes);
|
||||
if (numIncludes > 0) {
|
||||
expectedParams.put("_source_include", includesParam.toString());
|
||||
expectedParams.put("_source_include", includesParam);
|
||||
}
|
||||
int numExcludes = randomIntBetween(0, 5);
|
||||
String[] excludes = new String[numExcludes];
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < numExcludes; i++) {
|
||||
String exclude = randomAlphaOfLengthBetween(3, 10);
|
||||
excludes[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < numExcludes - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
String excludesParam = randomFields(excludes);
|
||||
if (numExcludes > 0) {
|
||||
expectedParams.put("_source_exclude", excludesParam.toString());
|
||||
expectedParams.put("_source_exclude", excludesParam);
|
||||
}
|
||||
consumer.accept(new FetchSourceContext(true, includes, excludes));
|
||||
}
|
||||
|
@ -1042,18 +1020,24 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomWaitForActiveShards(Consumer<Integer> setter, Map<String, String> expectedParams) {
|
||||
private static void setRandomWaitForActiveShards(Consumer<ActiveShardCount> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
int waitForActiveShards = randomIntBetween(0, 10);
|
||||
setter.accept(waitForActiveShards);
|
||||
expectedParams.put("wait_for_active_shards", String.valueOf(waitForActiveShards));
|
||||
String waitForActiveShardsString;
|
||||
int waitForActiveShards = randomIntBetween(-1, 5);
|
||||
if (waitForActiveShards == -1) {
|
||||
waitForActiveShardsString = "all";
|
||||
} else {
|
||||
waitForActiveShardsString = String.valueOf(waitForActiveShards);
|
||||
}
|
||||
setter.accept(ActiveShardCount.parseString(waitForActiveShardsString));
|
||||
expectedParams.put("wait_for_active_shards", waitForActiveShardsString);
|
||||
}
|
||||
}
|
||||
|
||||
private static void setRandomRefreshPolicy(ReplicatedWriteRequest<?> request, Map<String, String> expectedParams) {
|
||||
private static void setRandomRefreshPolicy(Consumer<WriteRequest.RefreshPolicy> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
WriteRequest.RefreshPolicy refreshPolicy = randomFrom(WriteRequest.RefreshPolicy.values());
|
||||
request.setRefreshPolicy(refreshPolicy);
|
||||
setter.accept(refreshPolicy);
|
||||
if (refreshPolicy != WriteRequest.RefreshPolicy.NONE) {
|
||||
expectedParams.put("refresh", refreshPolicy.getValue());
|
||||
}
|
||||
|
@ -1070,13 +1054,26 @@ public class RequestTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private static void setRandomVersionType(DocWriteRequest<?> request, Map<String, String> expectedParams) {
|
||||
private static void setRandomVersionType(Consumer<VersionType> setter, Map<String, String> expectedParams) {
|
||||
if (randomBoolean()) {
|
||||
VersionType versionType = randomFrom(VersionType.values());
|
||||
request.versionType(versionType);
|
||||
setter.accept(versionType);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
expectedParams.put("version_type", versionType.name().toLowerCase(Locale.ROOT));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String randomFields(String[] fields) {
|
||||
StringBuilder excludesParam = new StringBuilder();
|
||||
for (int i = 0; i < fields.length; i++) {
|
||||
String exclude = randomAlphaOfLengthBetween(3, 10);
|
||||
fields[i] = exclude;
|
||||
excludesParam.append(exclude);
|
||||
if (i < fields.length - 1) {
|
||||
excludesParam.append(",");
|
||||
}
|
||||
}
|
||||
return excludesParam.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,9 +75,6 @@ public class CustomFieldQuery extends FieldQuery {
|
|||
} else if (sourceQuery instanceof BlendedTermQuery) {
|
||||
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
|
||||
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof ESToParentBlockJoinQuery) {
|
||||
ESToParentBlockJoinQuery blockJoinQuery = (ESToParentBlockJoinQuery) sourceQuery;
|
||||
flatten(blockJoinQuery.getChildQuery(), reader, flatQueries, boost);
|
||||
} else if (sourceQuery instanceof BoostingQuery) {
|
||||
BoostingQuery boostingQuery = (BoostingQuery) sourceQuery;
|
||||
//flatten positive query with query boost
|
||||
|
|
|
@ -107,6 +107,8 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_6_4 = new Version(V_5_6_4_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_5_ID = 5060599;
|
||||
public static final Version V_5_6_5 = new Version(V_5_6_5_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_5_6_6_ID = 5060699;
|
||||
public static final Version V_5_6_6 = new Version(V_5_6_6_ID, org.apache.lucene.util.Version.LUCENE_6_6_1);
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 =
|
||||
new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_7_0_0);
|
||||
|
@ -178,6 +180,8 @@ public class Version implements Comparable<Version> {
|
|||
return V_6_0_0_alpha2;
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_6_6_ID:
|
||||
return V_5_6_6;
|
||||
case V_5_6_5_ID:
|
||||
return V_5_6_5;
|
||||
case V_5_6_4_ID:
|
||||
|
|
|
@ -21,15 +21,34 @@ package org.elasticsearch.action.admin.indices.open;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
|
||||
/**
|
||||
* A response for a open index action.
|
||||
*/
|
||||
public class OpenIndexResponse extends AcknowledgedResponse {
|
||||
public class OpenIndexResponse extends AcknowledgedResponse implements ToXContentObject {
|
||||
private static final String SHARDS_ACKNOWLEDGED = "shards_acknowledged";
|
||||
private static final ParseField SHARDS_ACKNOWLEDGED_PARSER = new ParseField(SHARDS_ACKNOWLEDGED);
|
||||
|
||||
private static final ConstructingObjectParser<OpenIndexResponse, Void> PARSER = new ConstructingObjectParser<>("open_index", true,
|
||||
args -> new OpenIndexResponse((boolean) args[0], (boolean) args[1]));
|
||||
|
||||
static {
|
||||
declareAcknowledgedField(PARSER);
|
||||
PARSER.declareField(constructorArg(), (parser, context) -> parser.booleanValue(), SHARDS_ACKNOWLEDGED_PARSER,
|
||||
ObjectParser.ValueType.BOOLEAN);
|
||||
}
|
||||
|
||||
private boolean shardsAcknowledged;
|
||||
|
||||
|
@ -68,4 +87,17 @@ public class OpenIndexResponse extends AcknowledgedResponse {
|
|||
out.writeBoolean(shardsAcknowledged);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
addAcknowledgedField(builder);
|
||||
builder.field(SHARDS_ACKNOWLEDGED, isShardsAcknowledged());
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
public static OpenIndexResponse fromXContent(XContentParser parser) throws IOException {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
@Override
|
||||
protected void doExecute(MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
|
||||
final long relativeStartTime = relativeTimeProvider.getAsLong();
|
||||
|
||||
|
||||
ClusterState clusterState = clusterService.state();
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
|
@ -130,7 +130,7 @@ public class TransportMultiSearchAction extends HandledTransportAction<MultiSear
|
|||
* of concurrent requests. At first glance, it appears that we should never poll from the queue and not obtain a request given
|
||||
* that we only poll here no more times than the number of requests. However, this is not the only consumer of this queue as
|
||||
* earlier requests that have already completed will poll from the queue too and they could complete before later polls are
|
||||
* invoked here. Thus, it can be the case that we poll here and and the queue was empty.
|
||||
* invoked here. Thus, it can be the case that we poll here and the queue was empty.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.common.settings.SettingsModule;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.node.InternalSettingsPreparer;
|
||||
|
@ -169,11 +170,12 @@ public abstract class TransportClient extends AbstractClient {
|
|||
CircuitBreakerService circuitBreakerService = Node.createCircuitBreakerService(settingsModule.getSettings(),
|
||||
settingsModule.getClusterSettings());
|
||||
resourcesToClose.add(circuitBreakerService);
|
||||
BigArrays bigArrays = new BigArrays(settings, circuitBreakerService);
|
||||
PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings);
|
||||
BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
resourcesToClose.add(bigArrays);
|
||||
modules.add(settingsModule);
|
||||
NetworkModule networkModule = new NetworkModule(settings, true, pluginsService.filterPlugins(NetworkPlugin.class), threadPool,
|
||||
bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
|
||||
bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, null);
|
||||
final Transport transport = networkModule.getTransportSupplier().get();
|
||||
final TransportService transportService = new TransportService(settings, transport, threadPool,
|
||||
networkModule.getTransportInterceptor(),
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -58,7 +59,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
this.indexRouting = indexRouting;
|
||||
this.searchRouting = searchRouting;
|
||||
if (searchRouting != null) {
|
||||
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
|
||||
searchRoutingValues = Collections.unmodifiableSet(Sets.newHashSet(Strings.splitStringByCommaToArray(searchRouting)));
|
||||
} else {
|
||||
searchRoutingValues = emptySet();
|
||||
}
|
||||
|
@ -186,7 +187,7 @@ public class AliasMetaData extends AbstractDiffable<AliasMetaData> {
|
|||
}
|
||||
if (in.readBoolean()) {
|
||||
searchRouting = in.readString();
|
||||
searchRoutingValues = Collections.unmodifiableSet(Strings.splitStringByCommaToSet(searchRouting));
|
||||
searchRoutingValues = Collections.unmodifiableSet(Sets.newHashSet(Strings.splitStringByCommaToArray(searchRouting)));
|
||||
} else {
|
||||
searchRouting = null;
|
||||
searchRoutingValues = emptySet();
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.common.joda.DateMathParser;
|
|||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
|
@ -358,6 +359,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
resolvedExpressions = expressionResolver.resolve(context, resolvedExpressions);
|
||||
}
|
||||
|
||||
// TODO: it appears that this can never be true?
|
||||
if (isAllIndices(resolvedExpressions)) {
|
||||
return resolveSearchRoutingAllIndices(state.metaData(), routing);
|
||||
}
|
||||
|
@ -367,7 +369,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
// List of indices that don't require any routing
|
||||
Set<String> norouting = new HashSet<>();
|
||||
if (routing != null) {
|
||||
paramRouting = Strings.splitStringByCommaToSet(routing);
|
||||
paramRouting = Sets.newHashSet(Strings.splitStringByCommaToArray(routing));
|
||||
}
|
||||
|
||||
for (String expression : resolvedExpressions) {
|
||||
|
@ -442,9 +444,9 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
/**
|
||||
* Sets the same routing for all indices
|
||||
*/
|
||||
private Map<String, Set<String>> resolveSearchRoutingAllIndices(MetaData metaData, String routing) {
|
||||
public Map<String, Set<String>> resolveSearchRoutingAllIndices(MetaData metaData, String routing) {
|
||||
if (routing != null) {
|
||||
Set<String> r = Strings.splitStringByCommaToSet(routing);
|
||||
Set<String> r = Sets.newHashSet(Strings.splitStringByCommaToArray(routing));
|
||||
Map<String, Set<String>> routings = new HashMap<>();
|
||||
String[] concreteIndices = metaData.getConcreteAllIndices();
|
||||
for (String index : concreteIndices) {
|
||||
|
|
|
@ -82,7 +82,6 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
public IndexMetaData upgradeIndexMetaData(IndexMetaData indexMetaData, Version minimumIndexCompatibilityVersion) {
|
||||
// Throws an exception if there are too-old segments:
|
||||
if (isUpgraded(indexMetaData)) {
|
||||
assert indexMetaData == archiveBrokenIndexSettings(indexMetaData) : "all settings must have been upgraded before";
|
||||
return indexMetaData;
|
||||
}
|
||||
checkSupportedVersion(indexMetaData, minimumIndexCompatibilityVersion);
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.List;
|
|||
import java.util.Set;
|
||||
import java.util.StringTokenizer;
|
||||
import java.util.TreeSet;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||
|
@ -410,62 +411,27 @@ public class Strings {
|
|||
return collection.toArray(new String[collection.size()]);
|
||||
}
|
||||
|
||||
public static Set<String> splitStringByCommaToSet(final String s) {
|
||||
return splitStringToSet(s, ',');
|
||||
}
|
||||
|
||||
public static String[] splitStringByCommaToArray(final String s) {
|
||||
if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
|
||||
else return s.split(",");
|
||||
/**
|
||||
* Tokenize the specified string by commas to a set, trimming whitespace and ignoring empty tokens.
|
||||
*
|
||||
* @param s the string to tokenize
|
||||
* @return the set of tokens
|
||||
*/
|
||||
public static Set<String> tokenizeByCommaToSet(final String s) {
|
||||
if (s == null) return Collections.emptySet();
|
||||
return tokenizeToCollection(s, ",", HashSet::new);
|
||||
}
|
||||
|
||||
/**
|
||||
* A convenience method for splitting a delimited string into
|
||||
* a set and trimming leading and trailing whitespace from all
|
||||
* split strings.
|
||||
* Split the specified string by commas to an array.
|
||||
*
|
||||
* @param s the string to split
|
||||
* @param c the delimiter to split on
|
||||
* @return the set of split strings
|
||||
* @return the array of split values
|
||||
* @see String#split(String)
|
||||
*/
|
||||
public static Set<String> splitStringToSet(final String s, final char c) {
|
||||
if (s == null || s.isEmpty()) {
|
||||
return Collections.emptySet();
|
||||
}
|
||||
final char[] chars = s.toCharArray();
|
||||
int count = 1;
|
||||
for (final char x : chars) {
|
||||
if (x == c) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
final Set<String> result = new HashSet<>(count);
|
||||
final int len = chars.length;
|
||||
int start = 0; // starting index in chars of the current substring.
|
||||
int pos = 0; // current index in chars.
|
||||
int end = 0; // the position of the end of the current token
|
||||
for (; pos < len; pos++) {
|
||||
if (chars[pos] == c) {
|
||||
int size = end - start;
|
||||
if (size > 0) { // only add non empty strings
|
||||
result.add(new String(chars, start, size));
|
||||
}
|
||||
start = pos + 1;
|
||||
end = start;
|
||||
} else if (Character.isWhitespace(chars[pos])) {
|
||||
if (start == pos) {
|
||||
// skip over preceding whitespace
|
||||
start++;
|
||||
}
|
||||
} else {
|
||||
end = pos + 1;
|
||||
}
|
||||
}
|
||||
int size = end - start;
|
||||
if (size > 0) {
|
||||
result.add(new String(chars, start, size));
|
||||
}
|
||||
return result;
|
||||
public static String[] splitStringByCommaToArray(final String s) {
|
||||
if (s == null || s.isEmpty()) return Strings.EMPTY_ARRAY;
|
||||
else return s.split(",");
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -499,7 +465,7 @@ public class Strings {
|
|||
* tokens. A delimiter is always a single character; for multi-character
|
||||
* delimiters, consider using <code>delimitedListToStringArray</code>
|
||||
*
|
||||
* @param str the String to tokenize
|
||||
* @param s the String to tokenize
|
||||
* @param delimiters the delimiter characters, assembled as String
|
||||
* (each of those characters is individually considered as delimiter).
|
||||
* @return an array of the tokens
|
||||
|
@ -507,48 +473,35 @@ public class Strings {
|
|||
* @see java.lang.String#trim()
|
||||
* @see #delimitedListToStringArray
|
||||
*/
|
||||
public static String[] tokenizeToStringArray(String str, String delimiters) {
|
||||
return tokenizeToStringArray(str, delimiters, true, true);
|
||||
public static String[] tokenizeToStringArray(final String s, final String delimiters) {
|
||||
return toStringArray(tokenizeToCollection(s, delimiters, ArrayList::new));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tokenize the given String into a String array via a StringTokenizer.
|
||||
* <p>The given delimiters string is supposed to consist of any number of
|
||||
* delimiter characters. Each of those characters can be used to separate
|
||||
* tokens. A delimiter is always a single character; for multi-character
|
||||
* delimiters, consider using <code>delimitedListToStringArray</code>
|
||||
* Tokenizes the specified string to a collection using the specified delimiters as the token delimiters. This method trims whitespace
|
||||
* from tokens and ignores empty tokens.
|
||||
*
|
||||
* @param str the String to tokenize
|
||||
* @param delimiters the delimiter characters, assembled as String
|
||||
* (each of those characters is individually considered as delimiter)
|
||||
* @param trimTokens trim the tokens via String's <code>trim</code>
|
||||
* @param ignoreEmptyTokens omit empty tokens from the result array
|
||||
* (only applies to tokens that are empty after trimming; StringTokenizer
|
||||
* will not consider subsequent delimiters as token in the first place).
|
||||
* @return an array of the tokens (<code>null</code> if the input String
|
||||
* was <code>null</code>)
|
||||
* @param s the string to tokenize.
|
||||
* @param delimiters the token delimiters
|
||||
* @param supplier a collection supplier
|
||||
* @param <T> the type of the collection
|
||||
* @return the tokens
|
||||
* @see java.util.StringTokenizer
|
||||
* @see java.lang.String#trim()
|
||||
* @see #delimitedListToStringArray
|
||||
*/
|
||||
public static String[] tokenizeToStringArray(
|
||||
String str, String delimiters, boolean trimTokens, boolean ignoreEmptyTokens) {
|
||||
|
||||
if (str == null) {
|
||||
private static <T extends Collection<String>> T tokenizeToCollection(
|
||||
final String s, final String delimiters, final Supplier<T> supplier) {
|
||||
if (s == null) {
|
||||
return null;
|
||||
}
|
||||
StringTokenizer st = new StringTokenizer(str, delimiters);
|
||||
List<String> tokens = new ArrayList<>();
|
||||
while (st.hasMoreTokens()) {
|
||||
String token = st.nextToken();
|
||||
if (trimTokens) {
|
||||
token = token.trim();
|
||||
}
|
||||
if (!ignoreEmptyTokens || token.length() > 0) {
|
||||
final StringTokenizer tokenizer = new StringTokenizer(s, delimiters);
|
||||
final T tokens = supplier.get();
|
||||
while (tokenizer.hasMoreTokens()) {
|
||||
final String token = tokenizer.nextToken().trim();
|
||||
if (token.length() > 0) {
|
||||
tokens.add(token);
|
||||
}
|
||||
}
|
||||
return toStringArray(tokens);
|
||||
return tokens;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,98 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.lucene;
|
||||
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* A simple delegate that delegates all {@link IndexCommit} calls to a delegated
|
||||
* {@link IndexCommit}.
|
||||
*
|
||||
*
|
||||
*/
|
||||
public abstract class IndexCommitDelegate extends IndexCommit {
|
||||
|
||||
protected final IndexCommit delegate;
|
||||
|
||||
/**
|
||||
* Constructs a new {@link IndexCommit} that will delegate all calls
|
||||
* to the provided delegate.
|
||||
*
|
||||
* @param delegate The delegate
|
||||
*/
|
||||
public IndexCommitDelegate(IndexCommit delegate) {
|
||||
this.delegate = delegate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getSegmentsFileName() {
|
||||
return delegate.getSegmentsFileName();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Collection<String> getFileNames() throws IOException {
|
||||
return delegate.getFileNames();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Directory getDirectory() {
|
||||
return delegate.getDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void delete() {
|
||||
delegate.delete();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isDeleted() {
|
||||
return delegate.isDeleted();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getSegmentCount() {
|
||||
return delegate.getSegmentCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
return delegate.equals(other);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return delegate.hashCode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getGeneration() {
|
||||
return delegate.getGeneration();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getUserData() throws IOException {
|
||||
return delegate.getUserData();
|
||||
}
|
||||
}
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
|
@ -107,6 +108,7 @@ public final class NetworkModule {
|
|||
*/
|
||||
public NetworkModule(Settings settings, boolean transportClient, List<NetworkPlugin> plugins, ThreadPool threadPool,
|
||||
BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NamedXContentRegistry xContentRegistry,
|
||||
|
@ -121,9 +123,9 @@ public final class NetworkModule {
|
|||
registerHttpTransport(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
Map<String, Supplier<Transport>> httpTransportFactory = plugin.getTransports(settings, threadPool, bigArrays,
|
||||
Map<String, Supplier<Transport>> transportFactory = plugin.getTransports(settings, threadPool, bigArrays, pageCacheRecycler,
|
||||
circuitBreakerService, namedWriteableRegistry, networkService);
|
||||
for (Map.Entry<String, Supplier<Transport>> entry : httpTransportFactory.entrySet()) {
|
||||
for (Map.Entry<String, Supplier<Transport>> entry : transportFactory.entrySet()) {
|
||||
registerTransport(entry.getKey(), entry.getValue());
|
||||
}
|
||||
List<TransportInterceptor> transportInterceptors = plugin.getTransportInterceptors(namedWriteableRegistry,
|
||||
|
|
|
@ -264,17 +264,41 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Validates that all given settings are registered and valid
|
||||
* @param settings the settings to validate
|
||||
* @param validateDependencies if <code>true</code> settings dependencies are validated as well.
|
||||
* Validates that all settings are registered and valid.
|
||||
*
|
||||
* @param settings the settings to validate
|
||||
* @param validateDependencies true if dependent settings should be validated
|
||||
* @see Setting#getSettingsDependencies(String)
|
||||
*/
|
||||
public final void validate(Settings settings, boolean validateDependencies) {
|
||||
List<RuntimeException> exceptions = new ArrayList<>();
|
||||
for (String key : settings.keySet()) { // settings iterate in deterministic fashion
|
||||
public final void validate(final Settings settings, final boolean validateDependencies) {
|
||||
validate(settings, validateDependencies, false, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that all settings are registered and valid.
|
||||
*
|
||||
* @param settings the settings
|
||||
* @param validateDependencies true if dependent settings should be validated
|
||||
* @param ignorePrivateSettings true if private settings should be ignored during validation
|
||||
* @param ignoreArchivedSettings true if archived settings should be ignored during validation
|
||||
* @see Setting#getSettingsDependencies(String)
|
||||
*/
|
||||
public final void validate(
|
||||
final Settings settings,
|
||||
final boolean validateDependencies,
|
||||
final boolean ignorePrivateSettings,
|
||||
final boolean ignoreArchivedSettings) {
|
||||
final List<RuntimeException> exceptions = new ArrayList<>();
|
||||
for (final String key : settings.keySet()) { // settings iterate in deterministic fashion
|
||||
if (isPrivateSetting(key) && ignorePrivateSettings) {
|
||||
continue;
|
||||
}
|
||||
if (key.startsWith(ARCHIVED_SETTINGS_PREFIX) && ignoreArchivedSettings) {
|
||||
continue;
|
||||
}
|
||||
try {
|
||||
validate(key, settings, validateDependencies);
|
||||
} catch (RuntimeException ex) {
|
||||
} catch (final RuntimeException ex) {
|
||||
exceptions.add(ex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,8 +169,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
|||
|
||||
)));
|
||||
|
||||
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY,
|
||||
BUILT_IN_INDEX_SETTINGS);
|
||||
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, BUILT_IN_INDEX_SETTINGS);
|
||||
|
||||
public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
|
||||
super(settings, settingsSet, Property.IndexScope);
|
||||
|
|
|
@ -372,9 +372,9 @@ public class BigArrays implements Releasable {
|
|||
final boolean checkBreaker;
|
||||
private final BigArrays circuitBreakingInstance;
|
||||
|
||||
public BigArrays(Settings settings, @Nullable final CircuitBreakerService breakerService) {
|
||||
public BigArrays(PageCacheRecycler recycler, @Nullable final CircuitBreakerService breakerService) {
|
||||
// Checking the breaker is disabled if not specified
|
||||
this(new PageCacheRecycler(settings), breakerService, false);
|
||||
this(recycler, breakerService, false);
|
||||
}
|
||||
|
||||
// public for tests
|
||||
|
|
|
@ -65,10 +65,10 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
|
|||
Releasables.close(true, bytePage, intPage, longPage, objectPage);
|
||||
}
|
||||
|
||||
protected PageCacheRecycler(Settings settings) {
|
||||
public PageCacheRecycler(Settings settings) {
|
||||
super(settings);
|
||||
final Type type = TYPE_SETTING .get(settings);
|
||||
final long limit = LIMIT_HEAP_SETTING .get(settings).getBytes();
|
||||
final Type type = TYPE_SETTING.get(settings);
|
||||
final long limit = LIMIT_HEAP_SETTING.get(settings).getBytes();
|
||||
final int availableProcessors = EsExecutors.numberOfProcessors(settings);
|
||||
|
||||
// We have a global amount of memory that we need to divide across data types.
|
||||
|
|
|
@ -137,7 +137,7 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor {
|
|||
@Override
|
||||
protected Runnable wrapRunnable(Runnable command) {
|
||||
if (command instanceof PrioritizedRunnable) {
|
||||
if ((command instanceof TieBreakingPrioritizedRunnable)) {
|
||||
if (command instanceof TieBreakingPrioritizedRunnable) {
|
||||
return command;
|
||||
}
|
||||
Priority priority = ((PrioritizedRunnable) command).priority();
|
||||
|
@ -145,9 +145,6 @@ public class PrioritizedEsThreadPoolExecutor extends EsThreadPoolExecutor {
|
|||
} else if (command instanceof PrioritizedFutureTask) {
|
||||
return command;
|
||||
} else { // it might be a callable wrapper...
|
||||
if (command instanceof TieBreakingPrioritizedRunnable) {
|
||||
return command;
|
||||
}
|
||||
return new TieBreakingPrioritizedRunnable(super.wrapRunnable(command), Priority.NORMAL, insertionOrder.incrementAndGet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,8 +26,10 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.transport.PortsRange;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptyList;
|
||||
|
@ -93,6 +95,9 @@ public final class HttpTransportSettings {
|
|||
public static final Setting<Boolean> SETTING_HTTP_RESET_COOKIES =
|
||||
Setting.boolSetting("http.reset_cookies", false, Property.NodeScope);
|
||||
|
||||
public static final Setting<TimeValue> SETTING_HTTP_READ_TIMEOUT =
|
||||
Setting.timeSetting("http.read_timeout", new TimeValue(30, TimeUnit.SECONDS), new TimeValue(0), Property.NodeScope);
|
||||
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_NO_DELAY =
|
||||
boolSetting("http.tcp_no_delay", NetworkService.TCP_NO_DELAY, Setting.Property.NodeScope);
|
||||
public static final Setting<Boolean> SETTING_HTTP_TCP_KEEP_ALIVE =
|
||||
|
|
|
@ -180,6 +180,11 @@ final class DocumentParser {
|
|||
String[] parts = fullFieldPath.split("\\.");
|
||||
for (String part : parts) {
|
||||
if (Strings.hasText(part) == false) {
|
||||
// check if the field name contains only whitespace
|
||||
if (Strings.isEmpty(part) == false) {
|
||||
throw new IllegalArgumentException(
|
||||
"object field cannot contain only whitespace: ['" + fullFieldPath + "']");
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
"object field starting or ending with a [.] makes object resolution ambiguous: [" + fullFieldPath + "]");
|
||||
}
|
||||
|
|
|
@ -341,11 +341,9 @@ public class GeoShapeQueryBuilder extends AbstractQueryBuilder<GeoShapeQueryBuil
|
|||
} else {
|
||||
throw new QueryShardException(context, "failed to find geo_shape field [" + fieldName + "]");
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This isn't the nicest way to check this
|
||||
if (!(fieldType instanceof GeoShapeFieldMapper.GeoShapeFieldType)) {
|
||||
throw new QueryShardException(context, "Field [" + fieldName + "] is not a geo_shape");
|
||||
} else if (fieldType.typeName().equals(GeoShapeFieldMapper.CONTENT_TYPE) == false) {
|
||||
throw new QueryShardException(context,
|
||||
"Field [" + fieldName + "] is not of type [geo_shape] but of type [" + fieldType.typeName() + "]");
|
||||
}
|
||||
|
||||
final GeoShapeFieldMapper.GeoShapeFieldType shapeFieldType = (GeoShapeFieldMapper.GeoShapeFieldType) fieldType;
|
||||
|
|
|
@ -1,145 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
public class CommitPoint {
|
||||
|
||||
public static final CommitPoint NULL = new CommitPoint(-1, "_null_", Type.GENERATED, Collections.<CommitPoint.FileInfo>emptyList(), Collections.<CommitPoint.FileInfo>emptyList());
|
||||
|
||||
public static class FileInfo {
|
||||
private final String name;
|
||||
private final String physicalName;
|
||||
private final long length;
|
||||
private final String checksum;
|
||||
|
||||
public FileInfo(String name, String physicalName, long length, String checksum) {
|
||||
this.name = name;
|
||||
this.physicalName = physicalName;
|
||||
this.length = length;
|
||||
this.checksum = checksum;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public String physicalName() {
|
||||
return this.physicalName;
|
||||
}
|
||||
|
||||
public long length() {
|
||||
return length;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public String checksum() {
|
||||
return checksum;
|
||||
}
|
||||
}
|
||||
|
||||
public enum Type {
|
||||
GENERATED,
|
||||
SAVED
|
||||
}
|
||||
|
||||
private final long version;
|
||||
|
||||
private final String name;
|
||||
|
||||
private final Type type;
|
||||
|
||||
private final List<FileInfo> indexFiles;
|
||||
|
||||
private final List<FileInfo> translogFiles;
|
||||
|
||||
public CommitPoint(long version, String name, Type type, List<FileInfo> indexFiles, List<FileInfo> translogFiles) {
|
||||
this.version = version;
|
||||
this.name = name;
|
||||
this.type = type;
|
||||
this.indexFiles = Collections.unmodifiableList(new ArrayList<>(indexFiles));
|
||||
this.translogFiles = Collections.unmodifiableList(new ArrayList<>(translogFiles));
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public String name() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public Type type() {
|
||||
return this.type;
|
||||
}
|
||||
|
||||
public List<FileInfo> indexFiles() {
|
||||
return this.indexFiles;
|
||||
}
|
||||
|
||||
public List<FileInfo> translogFiles() {
|
||||
return this.translogFiles;
|
||||
}
|
||||
|
||||
public boolean containPhysicalIndexFile(String physicalName) {
|
||||
return findPhysicalIndexFile(physicalName) != null;
|
||||
}
|
||||
|
||||
public CommitPoint.FileInfo findPhysicalIndexFile(String physicalName) {
|
||||
for (FileInfo file : indexFiles) {
|
||||
if (file.physicalName().equals(physicalName)) {
|
||||
return file;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public CommitPoint.FileInfo findNameFile(String name) {
|
||||
CommitPoint.FileInfo fileInfo = findNameIndexFile(name);
|
||||
if (fileInfo != null) {
|
||||
return fileInfo;
|
||||
}
|
||||
return findNameTranslogFile(name);
|
||||
}
|
||||
|
||||
public CommitPoint.FileInfo findNameIndexFile(String name) {
|
||||
for (FileInfo file : indexFiles) {
|
||||
if (file.name().equals(name)) {
|
||||
return file;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public CommitPoint.FileInfo findNameTranslogFile(String name) {
|
||||
for (FileInfo file : translogFiles) {
|
||||
if (file.name().equals(name)) {
|
||||
return file;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
|
@ -1,201 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
public class CommitPoints implements Iterable<CommitPoint> {
|
||||
|
||||
private final List<CommitPoint> commitPoints;
|
||||
|
||||
public CommitPoints(List<CommitPoint> commitPoints) {
|
||||
CollectionUtil.introSort(commitPoints, new Comparator<CommitPoint>() {
|
||||
@Override
|
||||
public int compare(CommitPoint o1, CommitPoint o2) {
|
||||
return (o2.version() < o1.version() ? -1 : (o2.version() == o1.version() ? 0 : 1));
|
||||
}
|
||||
});
|
||||
this.commitPoints = Collections.unmodifiableList(new ArrayList<>(commitPoints));
|
||||
}
|
||||
|
||||
public List<CommitPoint> commits() {
|
||||
return this.commitPoints;
|
||||
}
|
||||
|
||||
public boolean hasVersion(long version) {
|
||||
for (CommitPoint commitPoint : commitPoints) {
|
||||
if (commitPoint.version() == version) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public CommitPoint.FileInfo findPhysicalIndexFile(String physicalName) {
|
||||
for (CommitPoint commitPoint : commitPoints) {
|
||||
CommitPoint.FileInfo fileInfo = commitPoint.findPhysicalIndexFile(physicalName);
|
||||
if (fileInfo != null) {
|
||||
return fileInfo;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public CommitPoint.FileInfo findNameFile(String name) {
|
||||
for (CommitPoint commitPoint : commitPoints) {
|
||||
CommitPoint.FileInfo fileInfo = commitPoint.findNameFile(name);
|
||||
if (fileInfo != null) {
|
||||
return fileInfo;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Iterator<CommitPoint> iterator() {
|
||||
return commitPoints.iterator();
|
||||
}
|
||||
|
||||
public static byte[] toXContent(CommitPoint commitPoint) throws Exception {
|
||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
|
||||
builder.startObject();
|
||||
builder.field("version", commitPoint.version());
|
||||
builder.field("name", commitPoint.name());
|
||||
builder.field("type", commitPoint.type().toString());
|
||||
|
||||
builder.startObject("index_files");
|
||||
for (CommitPoint.FileInfo fileInfo : commitPoint.indexFiles()) {
|
||||
builder.startObject(fileInfo.name());
|
||||
builder.field("physical_name", fileInfo.physicalName());
|
||||
builder.field("length", fileInfo.length());
|
||||
if (fileInfo.checksum() != null) {
|
||||
builder.field("checksum", fileInfo.checksum());
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("translog_files");
|
||||
for (CommitPoint.FileInfo fileInfo : commitPoint.translogFiles()) {
|
||||
builder.startObject(fileInfo.name());
|
||||
builder.field("physical_name", fileInfo.physicalName());
|
||||
builder.field("length", fileInfo.length());
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.endObject();
|
||||
return BytesReference.toBytes(builder.bytes());
|
||||
}
|
||||
|
||||
public static CommitPoint fromXContent(byte[] data) throws Exception {
|
||||
// EMPTY is safe here because we never call namedObject
|
||||
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(NamedXContentRegistry.EMPTY, data)) {
|
||||
String currentFieldName = null;
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
if (token == null) {
|
||||
// no data...
|
||||
throw new IOException("No commit point data");
|
||||
}
|
||||
long version = -1;
|
||||
String name = null;
|
||||
CommitPoint.Type type = null;
|
||||
List<CommitPoint.FileInfo> indexFiles = new ArrayList<>();
|
||||
List<CommitPoint.FileInfo> translogFiles = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
List<CommitPoint.FileInfo> files = null;
|
||||
if ("index_files".equals(currentFieldName) || "indexFiles".equals(currentFieldName)) {
|
||||
files = indexFiles;
|
||||
} else if ("translog_files".equals(currentFieldName) || "translogFiles".equals(currentFieldName)) {
|
||||
files = translogFiles;
|
||||
} else {
|
||||
throw new IOException("Can't handle object with name [" + currentFieldName + "]");
|
||||
}
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
String fileName = currentFieldName;
|
||||
String physicalName = null;
|
||||
long size = -1;
|
||||
String checksum = null;
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
if (token == XContentParser.Token.FIELD_NAME) {
|
||||
currentFieldName = parser.currentName();
|
||||
} else if (token.isValue()) {
|
||||
if ("physical_name".equals(currentFieldName) || "physicalName".equals(currentFieldName)) {
|
||||
physicalName = parser.text();
|
||||
} else if ("length".equals(currentFieldName)) {
|
||||
size = parser.longValue();
|
||||
} else if ("checksum".equals(currentFieldName)) {
|
||||
checksum = parser.text();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (physicalName == null) {
|
||||
throw new IOException("Malformed commit, missing physical_name for [" + fileName + "]");
|
||||
}
|
||||
if (size == -1) {
|
||||
throw new IOException("Malformed commit, missing length for [" + fileName + "]");
|
||||
}
|
||||
files.add(new CommitPoint.FileInfo(fileName, physicalName, size, checksum));
|
||||
}
|
||||
}
|
||||
} else if (token.isValue()) {
|
||||
if ("version".equals(currentFieldName)) {
|
||||
version = parser.longValue();
|
||||
} else if ("name".equals(currentFieldName)) {
|
||||
name = parser.text();
|
||||
} else if ("type".equals(currentFieldName)) {
|
||||
type = CommitPoint.Type.valueOf(parser.text());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (version == -1) {
|
||||
throw new IOException("Malformed commit, missing version");
|
||||
}
|
||||
if (name == null) {
|
||||
throw new IOException("Malformed commit, missing name");
|
||||
}
|
||||
if (type == null) {
|
||||
throw new IOException("Malformed commit, missing type");
|
||||
}
|
||||
|
||||
return new CommitPoint(version, name, type, indexFiles, translogFiles);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -152,7 +152,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
private final TimeValue shardsClosedTimeout;
|
||||
private final AnalysisRegistry analysisRegistry;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final IndexScopedSettings indexScopeSetting;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
private final IndicesFieldDataCache indicesFieldDataCache;
|
||||
private final CacheCleaner cacheCleaner;
|
||||
private final ThreadPool threadPool;
|
||||
|
@ -198,7 +198,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
indexingMemoryController = new IndexingMemoryController(settings, threadPool,
|
||||
// ensure we pull an iter with new shards - flatten makes a copy
|
||||
() -> Iterables.flatten(this).iterator());
|
||||
this.indexScopeSetting = indexScopedSettings;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
this.circuitBreakerService = circuitBreakerService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
||||
|
@ -432,7 +432,9 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
IndicesFieldDataCache indicesFieldDataCache,
|
||||
List<IndexEventListener> builtInListeners,
|
||||
IndexingOperationListener... indexingOperationListeners) throws IOException {
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting);
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopedSettings);
|
||||
// we ignore private settings since they are not registered settings
|
||||
indexScopedSettings.validate(indexMetaData.getSettings(), true, true, true);
|
||||
logger.debug("creating Index [{}], shards [{}]/[{}] - reason [{}]",
|
||||
indexMetaData.getIndex(),
|
||||
idxSettings.getNumberOfShards(),
|
||||
|
@ -470,7 +472,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
* Note: the returned {@link MapperService} should be closed when unneeded.
|
||||
*/
|
||||
public synchronized MapperService createIndexMapperService(IndexMetaData indexMetaData) throws IOException {
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopeSetting);
|
||||
final IndexSettings idxSettings = new IndexSettings(indexMetaData, this.settings, indexScopedSettings);
|
||||
final IndexModule indexModule = new IndexModule(idxSettings, analysisRegistry);
|
||||
pluginsService.onIndexModule(indexModule);
|
||||
return indexModule.newIndexMapperService(xContentRegistry, mapperRegistry, scriptService);
|
||||
|
@ -1268,7 +1270,7 @@ public class IndicesService extends AbstractLifecycleComponent
|
|||
/**
|
||||
* Returns a function which given an index name, returns a predicate which fields must match in order to be returned by get mappings,
|
||||
* get index, get field mappings and field capabilities API. Useful to filter the fields that such API return.
|
||||
* The predicate receives the the field name as input argument. In case multiple plugins register a field filter through
|
||||
* The predicate receives the field name as input argument. In case multiple plugins register a field filter through
|
||||
* {@link org.elasticsearch.plugins.MapperPlugin#getFieldFilter()}, only fields that match all the registered filters will be
|
||||
* returned by get mappings, get index, get field mappings and field capabilities API.
|
||||
*/
|
||||
|
|
|
@ -80,6 +80,7 @@ import org.elasticsearch.common.transport.BoundTransportAddress;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
|
@ -363,7 +364,8 @@ public class Node implements Closeable {
|
|||
modules.add(new GatewayModule());
|
||||
|
||||
|
||||
BigArrays bigArrays = createBigArrays(settings, circuitBreakerService);
|
||||
PageCacheRecycler pageCacheRecycler = createPageCacheRecycler(settings);
|
||||
BigArrays bigArrays = createBigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
resourcesToClose.add(bigArrays);
|
||||
modules.add(settingsModule);
|
||||
List<NamedWriteableRegistry.Entry> namedWriteables = Stream.of(
|
||||
|
@ -403,7 +405,8 @@ public class Node implements Closeable {
|
|||
|
||||
final RestController restController = actionModule.getRestController();
|
||||
final NetworkModule networkModule = new NetworkModule(settings, false, pluginsService.filterPlugins(NetworkPlugin.class),
|
||||
threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, xContentRegistry, networkService, restController);
|
||||
threadPool, bigArrays, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, xContentRegistry,
|
||||
networkService, restController);
|
||||
Collection<UnaryOperator<Map<String, MetaData.Custom>>> customMetaDataUpgraders =
|
||||
pluginsService.filterPlugins(Plugin.class).stream()
|
||||
.map(Plugin::getCustomMetaDataUpgrader)
|
||||
|
@ -898,8 +901,16 @@ public class Node implements Closeable {
|
|||
* Creates a new {@link BigArrays} instance used for this node.
|
||||
* This method can be overwritten by subclasses to change their {@link BigArrays} implementation for instance for testing
|
||||
*/
|
||||
BigArrays createBigArrays(Settings settings, CircuitBreakerService circuitBreakerService) {
|
||||
return new BigArrays(settings, circuitBreakerService);
|
||||
BigArrays createBigArrays(PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService) {
|
||||
return new BigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link BigArrays} instance used for this node.
|
||||
* This method can be overwritten by subclasses to change their {@link BigArrays} implementation for instance for testing
|
||||
*/
|
||||
PageCacheRecycler createPageCacheRecycler(Settings settings) {
|
||||
return new PageCacheRecycler(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
|||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
|
@ -58,6 +59,7 @@ public interface NetworkPlugin {
|
|||
* See {@link org.elasticsearch.common.network.NetworkModule#TRANSPORT_TYPE_KEY} to configure a specific implementation.
|
||||
*/
|
||||
default Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
|
|
@ -94,7 +94,7 @@ public abstract class AbstractRestChannel implements RestChannel {
|
|||
Set<String> includes = Collections.emptySet();
|
||||
Set<String> excludes = Collections.emptySet();
|
||||
if (useFiltering) {
|
||||
Set<String> filters = Strings.splitStringByCommaToSet(filterPath);
|
||||
Set<String> filters = Strings.tokenizeByCommaToSet(filterPath);
|
||||
includes = filters.stream().filter(INCLUDE_FILTER).collect(toSet());
|
||||
excludes = filters.stream().filter(EXCLUDE_FILTER).map(f -> f.substring(1)).collect(toSet());
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
|
|||
// still, /_nodes/_local (or any other node id) should work and be treated as usual
|
||||
// this means one must differentiate between allowed metrics and arbitrary node ids in the same place
|
||||
if (request.hasParam("nodeId") && !request.hasParam("metrics")) {
|
||||
Set<String> metricsOrNodeIds = Strings.splitStringByCommaToSet(request.param("nodeId", "_all"));
|
||||
Set<String> metricsOrNodeIds = Strings.tokenizeByCommaToSet(request.param("nodeId", "_all"));
|
||||
boolean isMetricsOnly = ALLOWED_METRICS.containsAll(metricsOrNodeIds);
|
||||
if (isMetricsOnly) {
|
||||
nodeIds = new String[]{"_all"};
|
||||
|
@ -87,7 +87,7 @@ public class RestNodesInfoAction extends BaseRestHandler {
|
|||
}
|
||||
} else {
|
||||
nodeIds = Strings.splitStringByCommaToArray(request.param("nodeId", "_all"));
|
||||
metrics = Strings.splitStringByCommaToSet(request.param("metrics", "_all"));
|
||||
metrics = Strings.tokenizeByCommaToSet(request.param("metrics", "_all"));
|
||||
}
|
||||
|
||||
final NodesInfoRequest nodesInfoRequest = new NodesInfoRequest(nodeIds);
|
||||
|
|
|
@ -92,7 +92,7 @@ public class RestNodesStatsAction extends BaseRestHandler {
|
|||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
|
||||
Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
|
||||
Set<String> metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
||||
|
||||
NodesStatsRequest nodesStatsRequest = new NodesStatsRequest(nodesIds);
|
||||
nodesStatsRequest.timeout(request.param("timeout"));
|
||||
|
@ -134,7 +134,7 @@ public class RestNodesStatsAction extends BaseRestHandler {
|
|||
|
||||
// check for index specific metrics
|
||||
if (metrics.contains("indices")) {
|
||||
Set<String> indexMetrics = Strings.splitStringByCommaToSet(request.param("index_metric", "_all"));
|
||||
Set<String> indexMetrics = Strings.tokenizeByCommaToSet(request.param("index_metric", "_all"));
|
||||
if (indexMetrics.size() == 1 && indexMetrics.contains("_all")) {
|
||||
nodesStatsRequest.indices(CommonStatsFlags.ALL);
|
||||
} else {
|
||||
|
|
|
@ -56,7 +56,7 @@ public class RestNodesUsageAction extends BaseRestHandler {
|
|||
@Override
|
||||
protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException {
|
||||
String[] nodesIds = Strings.splitStringByCommaToArray(request.param("nodeId"));
|
||||
Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
|
||||
Set<String> metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
||||
|
||||
NodesUsageRequest nodesUsageRequest = new NodesUsageRequest(nodesIds);
|
||||
nodesUsageRequest.timeout(request.param("timeout"));
|
||||
|
|
|
@ -91,7 +91,7 @@ public class RestIndicesStatsAction extends BaseRestHandler {
|
|||
indicesStatsRequest.indices(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
indicesStatsRequest.types(Strings.splitStringByCommaToArray(request.param("types")));
|
||||
|
||||
Set<String> metrics = Strings.splitStringByCommaToSet(request.param("metric", "_all"));
|
||||
Set<String> metrics = Strings.tokenizeByCommaToSet(request.param("metric", "_all"));
|
||||
// short cut, if no metrics have been specified in URI
|
||||
if (metrics.size() == 1 && metrics.contains("_all")) {
|
||||
indicesStatsRequest.all();
|
||||
|
|
|
@ -464,19 +464,13 @@ public final class DirectCandidateGeneratorBuilder implements CandidateGenerator
|
|||
}
|
||||
|
||||
static StringDistance resolveDistance(String distanceVal) {
|
||||
distanceVal = distanceVal.toLowerCase(Locale.US);
|
||||
distanceVal = distanceVal.toLowerCase(Locale.ROOT);
|
||||
if ("internal".equals(distanceVal)) {
|
||||
return DirectSpellChecker.INTERNAL_LEVENSHTEIN;
|
||||
} else if ("damerau_levenshtein".equals(distanceVal) || "damerauLevenshtein".equals(distanceVal)) {
|
||||
} else if ("damerau_levenshtein".equals(distanceVal)) {
|
||||
return new LuceneLevenshteinDistance();
|
||||
} else if ("levenstein".equals(distanceVal)) {
|
||||
DEPRECATION_LOGGER.deprecated("Deprecated distance [levenstein] used, replaced by [levenshtein]");
|
||||
return new LevensteinDistance();
|
||||
} else if ("levenshtein".equals(distanceVal)) {
|
||||
return new LevensteinDistance();
|
||||
} else if ("jarowinkler".equals(distanceVal)) {
|
||||
DEPRECATION_LOGGER.deprecated("Deprecated distance [jarowinkler] used, replaced by [jaro_winkler]");
|
||||
return new JaroWinklerDistance();
|
||||
} else if ("jaro_winkler".equals(distanceVal)) {
|
||||
return new JaroWinklerDistance();
|
||||
} else if ("ngram".equals(distanceVal)) {
|
||||
|
|
|
@ -581,23 +581,16 @@ public class TermSuggestionBuilder extends SuggestionBuilder<TermSuggestionBuild
|
|||
|
||||
public static StringDistanceImpl resolve(final String str) {
|
||||
Objects.requireNonNull(str, "Input string is null");
|
||||
final String distanceVal = str.toLowerCase(Locale.US);
|
||||
final String distanceVal = str.toLowerCase(Locale.ROOT);
|
||||
switch (distanceVal) {
|
||||
case "internal":
|
||||
return INTERNAL;
|
||||
case "damerau_levenshtein":
|
||||
case "damerauLevenshtein":
|
||||
return DAMERAU_LEVENSHTEIN;
|
||||
case "levenstein":
|
||||
DEPRECATION_LOGGER.deprecated("Deprecated distance [levenstein] used, replaced by [levenshtein]");
|
||||
return LEVENSHTEIN;
|
||||
case "levenshtein":
|
||||
return LEVENSHTEIN;
|
||||
case "ngram":
|
||||
return NGRAM;
|
||||
case "jarowinkler":
|
||||
DEPRECATION_LOGGER.deprecated("Deprecated distance [jarowinkler] used, replaced by [jaro_winkler]");
|
||||
return JARO_WINKLER;
|
||||
case "jaro_winkler":
|
||||
return JARO_WINKLER;
|
||||
default: throw new IllegalArgumentException("Illegal distance option " + str);
|
||||
|
|
|
@ -195,22 +195,22 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
protected final NetworkService networkService;
|
||||
protected final Set<ProfileSettings> profileSettings;
|
||||
|
||||
protected volatile TransportService transportService;
|
||||
// node id to actual channel
|
||||
protected final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
|
||||
private volatile TransportService transportService;
|
||||
|
||||
protected final ConcurrentMap<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
|
||||
private final ConcurrentMap<String, BoundTransportAddress> profileBoundAddresses = newConcurrentMap();
|
||||
// node id to actual channel
|
||||
private final ConcurrentMap<DiscoveryNode, NodeChannels> connectedNodes = newConcurrentMap();
|
||||
private final Map<String, List<TcpChannel>> serverChannels = newConcurrentMap();
|
||||
private final Set<TcpChannel> acceptedChannels = Collections.newSetFromMap(new ConcurrentHashMap<>());
|
||||
|
||||
protected final KeyedLock<String> connectionLock = new KeyedLock<>();
|
||||
private final KeyedLock<String> connectionLock = new KeyedLock<>();
|
||||
private final NamedWriteableRegistry namedWriteableRegistry;
|
||||
|
||||
// this lock is here to make sure we close this transport and disconnect all the client nodes
|
||||
// connections while no connect operations is going on... (this might help with 100% CPU when stopping the transport?)
|
||||
protected final ReadWriteLock closeLock = new ReentrantReadWriteLock();
|
||||
private final ReadWriteLock closeLock = new ReentrantReadWriteLock();
|
||||
protected final boolean compress;
|
||||
protected volatile BoundTransportAddress boundAddress;
|
||||
private volatile BoundTransportAddress boundAddress;
|
||||
private final String transportName;
|
||||
protected final ConnectionProfile defaultConnectionProfile;
|
||||
|
||||
|
@ -438,7 +438,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
public void close() {
|
||||
if (closed.compareAndSet(false, true)) {
|
||||
try {
|
||||
if (lifecycle.stopped()) {
|
||||
|
@ -582,7 +582,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
}
|
||||
|
||||
@Override
|
||||
public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) throws IOException {
|
||||
public final NodeChannels openConnection(DiscoveryNode node, ConnectionProfile connectionProfile) {
|
||||
if (node == null) {
|
||||
throw new ConnectTransportException(null, "can't open connection to a null node");
|
||||
}
|
||||
|
@ -602,6 +602,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
PlainActionFuture<Void> connectFuture = PlainActionFuture.newFuture();
|
||||
connectionFutures.add(connectFuture);
|
||||
TcpChannel channel = initiateChannel(node, connectionProfile.getConnectTimeout(), connectFuture);
|
||||
logger.trace(() -> new ParameterizedMessage("Tcp transport client channel opened: {}", channel));
|
||||
channels.add(channel);
|
||||
} catch (Exception e) {
|
||||
// If there was an exception when attempting to instantiate the raw channels, we close all of the channels
|
||||
|
@ -1041,6 +1042,7 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
boolean addedOnThisCall = acceptedChannels.add(channel);
|
||||
assert addedOnThisCall : "Channel should only be added to accept channel set once";
|
||||
channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel)));
|
||||
logger.trace(() -> new ParameterizedMessage("Tcp transport channel accepted: {}", channel));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1738,15 +1740,9 @@ public abstract class TcpTransport extends AbstractLifecycleComponent implements
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns count of currently open connections
|
||||
*/
|
||||
protected abstract long getNumOpenServerConnections();
|
||||
|
||||
@Override
|
||||
public final TransportStats getStats() {
|
||||
return new TransportStats(
|
||||
getNumOpenServerConnections(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(),
|
||||
return new TransportStats(acceptedChannels.size(), readBytesMetric.count(), readBytesMetric.sum(), transmittedBytesMetric.count(),
|
||||
transmittedBytesMetric.sum());
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.create;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.UnavailableShardsException;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
|
@ -29,16 +31,21 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.query.RangeQueryBuilder;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
|
@ -51,6 +58,8 @@ import static org.hamcrest.Matchers.allOf;
|
|||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.hamcrest.core.IsNull.notNullValue;
|
||||
|
||||
|
@ -344,4 +353,49 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
|
||||
assertEquals("Should have index name in response", "foo", response.index());
|
||||
}
|
||||
|
||||
public void testIndexWithUnknownSetting() throws Exception {
|
||||
final int replicas = internalCluster().numDataNodes() - 1;
|
||||
final Settings settings = Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", replicas).build();
|
||||
client().admin().indices().prepareCreate("test").setSettings(settings).get();
|
||||
ensureGreen("test");
|
||||
final ClusterState state = client().admin().cluster().prepareState().get().getState();
|
||||
|
||||
final Set<String> dataOrMasterNodeNames = new HashSet<>();
|
||||
for (final ObjectCursor<DiscoveryNode> node : state.nodes().getMasterAndDataNodes().values()) {
|
||||
assertTrue(dataOrMasterNodeNames.add(node.value.getName()));
|
||||
}
|
||||
|
||||
final IndexMetaData metaData = state.getMetaData().index("test");
|
||||
internalCluster().fullRestart(new InternalTestCluster.RestartCallback() {
|
||||
@Override
|
||||
public Settings onNodeStopped(String nodeName) throws Exception {
|
||||
if (dataOrMasterNodeNames.contains(nodeName)) {
|
||||
final NodeEnvironment nodeEnvironment = internalCluster().getInstance(NodeEnvironment.class, nodeName);
|
||||
final IndexMetaData brokenMetaData =
|
||||
IndexMetaData
|
||||
.builder(metaData)
|
||||
.settings(Settings.builder().put(metaData.getSettings()).put("index.foo", true))
|
||||
.build();
|
||||
// so evil
|
||||
IndexMetaData.FORMAT.write(brokenMetaData, nodeEnvironment.indexPaths(brokenMetaData.getIndex()));
|
||||
}
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
});
|
||||
ensureGreen(metaData.getIndex().getName()); // we have to wait for the index to show up in the metadata or we will fail in a race
|
||||
final ClusterState stateAfterRestart = client().admin().cluster().prepareState().get().getState();
|
||||
|
||||
// the index should not be open after we restart and recover the broken index metadata
|
||||
assertThat(stateAfterRestart.getMetaData().index(metaData.getIndex()).getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
|
||||
// try to open the index
|
||||
final ElasticsearchException e =
|
||||
expectThrows(ElasticsearchException.class, () -> client().admin().indices().prepareOpen("test").get());
|
||||
assertThat(e, hasToString(containsString("Failed to verify index " + metaData.getIndex())));
|
||||
assertNotNull(e.getCause());
|
||||
assertThat(e.getCause(), instanceOf(IllegalArgumentException.class));
|
||||
assertThat(e, hasToString(containsString("unknown setting [index.foo]")));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.open;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.test.XContentTestUtils.insertRandomFields;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
public class OpenIndexResponseTests extends ESTestCase {
|
||||
|
||||
public void testFromToXContent() throws IOException {
|
||||
final OpenIndexResponse openIndexResponse = createTestItem();
|
||||
|
||||
boolean humanReadable = randomBoolean();
|
||||
final XContentType xContentType = randomFrom(XContentType.values());
|
||||
BytesReference originalBytes = toShuffledXContent(openIndexResponse, xContentType, ToXContent.EMPTY_PARAMS, humanReadable);
|
||||
BytesReference mutated;
|
||||
if (randomBoolean()) {
|
||||
mutated = insertRandomFields(xContentType, originalBytes, null, random());
|
||||
} else {
|
||||
mutated = originalBytes;
|
||||
}
|
||||
|
||||
OpenIndexResponse parsedOpenIndexResponse;
|
||||
try (XContentParser parser = createParser(xContentType.xContent(), mutated)) {
|
||||
parsedOpenIndexResponse = OpenIndexResponse.fromXContent(parser);
|
||||
assertNull(parser.nextToken());
|
||||
}
|
||||
|
||||
assertThat(parsedOpenIndexResponse.isShardsAcknowledged(), equalTo(openIndexResponse.isShardsAcknowledged()));
|
||||
assertThat(parsedOpenIndexResponse.isAcknowledged(), equalTo(openIndexResponse.isAcknowledged()));
|
||||
}
|
||||
|
||||
private static OpenIndexResponse createTestItem() {
|
||||
boolean acknowledged = randomBoolean();
|
||||
boolean shardsAcked = acknowledged && randomBoolean();
|
||||
return new OpenIndexResponse(acknowledged, shardsAcked);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class AliasMetaDataTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws IOException {
|
||||
final AliasMetaData before =
|
||||
AliasMetaData
|
||||
.builder("alias")
|
||||
.filter("{ \"term\": \"foo\"}")
|
||||
.indexRouting("indexRouting")
|
||||
.routing("routing")
|
||||
.searchRouting("trim,tw , ltw , lw")
|
||||
.build();
|
||||
|
||||
assertThat(before.searchRoutingValues(), equalTo(Sets.newHashSet("trim", "tw ", " ltw ", " lw")));
|
||||
|
||||
final BytesStreamOutput out = new BytesStreamOutput();
|
||||
before.writeTo(out);
|
||||
|
||||
final StreamInput in = out.bytes().streamInput();
|
||||
final AliasMetaData after = new AliasMetaData(in);
|
||||
|
||||
assertThat(after, equalTo(before));
|
||||
}
|
||||
}
|
|
@ -90,30 +90,15 @@ public class StringsTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSplitStringToSet() {
|
||||
assertEquals(Strings.splitStringByCommaToSet(null), Sets.newHashSet());
|
||||
assertEquals(Strings.splitStringByCommaToSet(""), Sets.newHashSet());
|
||||
assertEquals(Strings.splitStringByCommaToSet("a,b,c"), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.splitStringByCommaToSet("a, b, c"), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.splitStringByCommaToSet(" a , b, c "), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.splitStringByCommaToSet("aa, bb, cc"), Sets.newHashSet("aa","bb","cc"));
|
||||
assertEquals(Strings.splitStringByCommaToSet(" a "), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.splitStringByCommaToSet(" a "), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.splitStringByCommaToSet(" aa "), Sets.newHashSet("aa"));
|
||||
assertEquals(Strings.splitStringByCommaToSet(" "), Sets.newHashSet());
|
||||
|
||||
assertEquals(Strings.splitStringToSet(null, ' '), Sets.newHashSet());
|
||||
assertEquals(Strings.splitStringToSet("", ' '), Sets.newHashSet());
|
||||
assertEquals(Strings.splitStringToSet("a b c", ' '), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.splitStringToSet("a, b, c", ' '), Sets.newHashSet("a,","b,","c"));
|
||||
assertEquals(Strings.splitStringToSet(" a b c ", ' '), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.splitStringToSet(" a b c ", ' '), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.splitStringToSet("aa bb cc", ' '), Sets.newHashSet("aa","bb","cc"));
|
||||
assertEquals(Strings.splitStringToSet(" a ", ' '), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.splitStringToSet(" a ", ' '), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.splitStringToSet(" a ", ' '), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.splitStringToSet("a ", ' '), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.splitStringToSet(" aa ", ' '), Sets.newHashSet("aa"));
|
||||
assertEquals(Strings.splitStringToSet("aa ", ' '), Sets.newHashSet("aa"));
|
||||
assertEquals(Strings.splitStringToSet(" ", ' '), Sets.newHashSet());
|
||||
assertEquals(Strings.tokenizeByCommaToSet(null), Sets.newHashSet());
|
||||
assertEquals(Strings.tokenizeByCommaToSet(""), Sets.newHashSet());
|
||||
assertEquals(Strings.tokenizeByCommaToSet("a,b,c"), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet("a, b, c"), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet(" a , b, c "), Sets.newHashSet("a","b","c"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet("aa, bb, cc"), Sets.newHashSet("aa","bb","cc"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet(" a "), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet(" a "), Sets.newHashSet("a"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet(" aa "), Sets.newHashSet("aa"));
|
||||
assertEquals(Strings.tokenizeByCommaToSet(" "), Sets.newHashSet());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.io.stream;
|
|||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -30,7 +31,7 @@ public class ReleasableBytesStreamOutputTests extends ESTestCase {
|
|||
|
||||
public void testRelease() throws Exception {
|
||||
MockBigArrays mockBigArrays =
|
||||
new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
try (ReleasableBytesStreamOutput output =
|
||||
getRandomReleasableBytesStreamOutput(mockBigArrays)) {
|
||||
output.writeBoolean(randomBoolean());
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.http.HttpInfo;
|
||||
|
@ -133,6 +134,7 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
NetworkPlugin plugin = new NetworkPlugin() {
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
@ -193,6 +195,7 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
@ -227,6 +230,7 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
NetworkModule module = newNetworkModule(settings, false, new NetworkPlugin() {
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
@ -306,7 +310,7 @@ public class NetworkModuleTests extends ModuleTestCase {
|
|||
}
|
||||
|
||||
private NetworkModule newNetworkModule(Settings settings, boolean transportClient, NetworkPlugin... plugins) {
|
||||
return new NetworkModule(settings, transportClient, Arrays.asList(plugins), threadPool, null, null, null, xContentRegistry(), null,
|
||||
new NullDispatcher());
|
||||
return new NetworkModule(settings, transportClient, Arrays.asList(plugins), threadPool, null, null, null, null,
|
||||
xContentRegistry(), null, new NullDispatcher());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,7 +42,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
|||
public class BigArraysTests extends ESTestCase {
|
||||
|
||||
private BigArrays randombigArrays() {
|
||||
return new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
private BigArrays bigArrays;
|
||||
|
|
|
@ -41,7 +41,7 @@ public class BytesRefHashTests extends ESSingleNodeTestCase {
|
|||
BytesRefHash hash;
|
||||
|
||||
private BigArrays randombigArrays() {
|
||||
return new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
private void newHash() {
|
||||
|
|
|
@ -36,7 +36,7 @@ public class LongHashTests extends ESSingleNodeTestCase {
|
|||
LongHash hash;
|
||||
|
||||
private BigArrays randombigArrays() {
|
||||
return new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
private void newHash() {
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.test.ESSingleNodeTestCase;
|
|||
public class LongObjectHashMapTests extends ESSingleNodeTestCase {
|
||||
|
||||
private BigArrays randombigArrays() {
|
||||
return new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
return new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
public void testDuel() {
|
||||
|
|
|
@ -50,7 +50,7 @@ public class EsThreadPoolExecutorTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
private void runThreadPoolExecutorTest(final int fill, final String executor) {
|
||||
final CountDownLatch latch = new CountDownLatch(fill);
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
for (int i = 0; i < fill; i++) {
|
||||
node().injector().getInstance(ThreadPool.class).executor(executor).execute(() -> {
|
||||
try {
|
||||
|
@ -64,12 +64,12 @@ public class EsThreadPoolExecutorTests extends ESSingleNodeTestCase {
|
|||
final AtomicBoolean rejected = new AtomicBoolean();
|
||||
node().injector().getInstance(ThreadPool.class).executor(executor).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
public void onFailure(final Exception e) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(Exception e) {
|
||||
public void onRejection(final Exception e) {
|
||||
rejected.set(true);
|
||||
assertThat(e, hasToString(containsString("name = es-thread-pool-executor-tests/" + executor + ", ")));
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
|
@ -124,7 +125,8 @@ public class IndexModuleTests extends ESTestCase {
|
|||
emptyMap(), emptyMap(), emptyMap());
|
||||
threadPool = new TestThreadPool("test");
|
||||
circuitBreakerService = new NoneCircuitBreakerService();
|
||||
bigArrays = new BigArrays(settings, circuitBreakerService);
|
||||
PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings);
|
||||
bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
scriptService = new ScriptService(settings, Collections.emptyMap(), Collections.emptyMap());
|
||||
clusterService = ClusterServiceUtils.createClusterService(threadPool);
|
||||
nodeEnvironment = new NodeEnvironment(settings, environment);
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.elasticsearch.index;
|
|||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.settings.AbstractScopedSettings;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -444,6 +445,55 @@ public class IndexSettingsTests extends ESTestCase {
|
|||
assertEquals(actual, settings.getGenerationThresholdSize());
|
||||
}
|
||||
|
||||
public void testPrivateSettingsValidation() {
|
||||
final Settings settings = Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, System.currentTimeMillis()).build();
|
||||
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
|
||||
{
|
||||
// validation should fail since we are not ignoring private settings
|
||||
final IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> indexScopedSettings.validate(settings, randomBoolean()));
|
||||
assertThat(e, hasToString(containsString("unknown setting [index.creation_date]")));
|
||||
}
|
||||
|
||||
{
|
||||
// validation should fail since we are not ignoring private settings
|
||||
final IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> indexScopedSettings.validate(settings, randomBoolean(), false, randomBoolean()));
|
||||
assertThat(e, hasToString(containsString("unknown setting [index.creation_date]")));
|
||||
}
|
||||
|
||||
// nothing should happen since we are ignoring private settings
|
||||
indexScopedSettings.validate(settings, randomBoolean(), true, randomBoolean());
|
||||
}
|
||||
|
||||
public void testArchivedSettingsValidation() {
|
||||
final Settings settings =
|
||||
Settings.builder().put(AbstractScopedSettings.ARCHIVED_SETTINGS_PREFIX + "foo", System.currentTimeMillis()).build();
|
||||
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
|
||||
|
||||
{
|
||||
// validation should fail since we are not ignoring archived settings
|
||||
final IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> indexScopedSettings.validate(settings, randomBoolean()));
|
||||
assertThat(e, hasToString(containsString("unknown setting [archived.foo]")));
|
||||
}
|
||||
|
||||
{
|
||||
// validation should fail since we are not ignoring archived settings
|
||||
final IllegalArgumentException e = expectThrows(
|
||||
IllegalArgumentException.class,
|
||||
() -> indexScopedSettings.validate(settings, randomBoolean(), randomBoolean(), false));
|
||||
assertThat(e, hasToString(containsString("unknown setting [archived.foo]")));
|
||||
}
|
||||
|
||||
// nothing should happen since we are ignoring archived settings
|
||||
indexScopedSettings.validate(settings, randomBoolean(), randomBoolean(), true);
|
||||
}
|
||||
|
||||
public void testArchiveBrokenIndexSettings() {
|
||||
Settings settings =
|
||||
IndexScopedSettings.DEFAULT_SCOPED_SETTINGS.archiveUnknownOrInvalidSettings(
|
||||
|
|
|
@ -2983,6 +2983,50 @@ public class InternalEngineTests extends EngineTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDoubleDeliveryReplicaAppendingAndDeleteOnly() throws IOException {
|
||||
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
|
||||
new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
|
||||
Engine.Index operation = appendOnlyReplica(doc, false, 1, randomIntBetween(0, 5));
|
||||
Engine.Index retry = appendOnlyReplica(doc, true, 1, randomIntBetween(0, 5));
|
||||
Engine.Delete delete = new Engine.Delete(operation.type(), operation.id(), operation.uid(),
|
||||
Math.max(retry.seqNo(), operation.seqNo())+1, operation.primaryTerm(), operation.version()+1, operation.versionType(),
|
||||
REPLICA, operation.startTime()+1);
|
||||
// operations with a seq# equal or lower to the local checkpoint are not indexed to lucene
|
||||
// and the version lookup is skipped
|
||||
final boolean belowLckp = operation.seqNo() == 0 && retry.seqNo() == 0;
|
||||
if (randomBoolean()) {
|
||||
Engine.IndexResult indexResult = engine.index(operation);
|
||||
assertFalse(engine.indexWriterHasDeletions());
|
||||
assertEquals(0, engine.getNumVersionLookups());
|
||||
assertNotNull(indexResult.getTranslogLocation());
|
||||
engine.delete(delete);
|
||||
assertEquals(1, engine.getNumVersionLookups());
|
||||
assertTrue(engine.indexWriterHasDeletions());
|
||||
Engine.IndexResult retryResult = engine.index(retry);
|
||||
assertEquals(belowLckp ? 1 : 2, engine.getNumVersionLookups());
|
||||
assertNotNull(retryResult.getTranslogLocation());
|
||||
assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) > 0);
|
||||
} else {
|
||||
Engine.IndexResult retryResult = engine.index(retry);
|
||||
assertFalse(engine.indexWriterHasDeletions());
|
||||
assertEquals(1, engine.getNumVersionLookups());
|
||||
assertNotNull(retryResult.getTranslogLocation());
|
||||
engine.delete(delete);
|
||||
assertTrue(engine.indexWriterHasDeletions());
|
||||
assertEquals(2, engine.getNumVersionLookups());
|
||||
Engine.IndexResult indexResult = engine.index(operation);
|
||||
assertEquals(belowLckp ? 2 : 3, engine.getNumVersionLookups());
|
||||
assertNotNull(retryResult.getTranslogLocation());
|
||||
assertTrue(retryResult.getTranslogLocation().compareTo(indexResult.getTranslogLocation()) < 0);
|
||||
}
|
||||
|
||||
engine.refresh("test");
|
||||
try (Engine.Searcher searcher = engine.acquireSearcher("test")) {
|
||||
TopDocs topDocs = searcher.searcher().search(new MatchAllDocsQuery(), 10);
|
||||
assertEquals(0, topDocs.totalHits);
|
||||
}
|
||||
}
|
||||
|
||||
public void testDoubleDeliveryReplicaAppendingOnly() throws IOException {
|
||||
final ParsedDocument doc = testParsedDocument("1", null, testDocumentWithTextField(),
|
||||
new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
|
||||
|
|
|
@ -1377,6 +1377,23 @@ public class DocumentParserTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testDynamicFieldsEmptyName() throws Exception {
|
||||
BytesReference bytes = XContentFactory.jsonBuilder()
|
||||
.startObject().startArray("top.")
|
||||
.startObject()
|
||||
.startObject("aoeu")
|
||||
.field("a", 1).field(" ", 2)
|
||||
.endObject()
|
||||
.endObject().endArray()
|
||||
.endObject().bytes();
|
||||
|
||||
IllegalArgumentException emptyFieldNameException = expectThrows(IllegalArgumentException.class,
|
||||
() -> client().prepareIndex("idx", "type").setSource(bytes, XContentType.JSON).get());
|
||||
|
||||
assertThat(emptyFieldNameException.getMessage(), containsString(
|
||||
"object field cannot contain only whitespace: ['top.aoeu. ']"));
|
||||
}
|
||||
|
||||
public void testBlankFieldNames() throws Exception {
|
||||
final BytesReference bytes = XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
|
|
|
@ -18,11 +18,13 @@
|
|||
*/
|
||||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.common.geo.SpatialStrategy;
|
||||
import org.elasticsearch.common.geo.builders.ShapeBuilder;
|
||||
import org.elasticsearch.index.mapper.GeoShapeFieldMapper;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.GeoShapeFieldMapper.GeoShapeFieldType;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class GeoShapeFieldTypeTests extends FieldTypeTestCase {
|
||||
@Override
|
||||
protected MappedFieldType createDefaultFieldType() {
|
||||
|
@ -68,4 +70,17 @@ public class GeoShapeFieldTypeTests extends FieldTypeTestCase {
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for {@link GeoShapeFieldType#setStrategyName(String)} that checks that {@link GeoShapeFieldType#pointsOnly()}
|
||||
* gets set as a side effect when using SpatialStrategy.TERM
|
||||
*/
|
||||
public void testSetStrategyName() throws IOException {
|
||||
GeoShapeFieldType fieldType = new GeoShapeFieldMapper.GeoShapeFieldType();
|
||||
assertFalse(fieldType.pointsOnly());
|
||||
fieldType.setStrategyName(SpatialStrategy.RECURSIVE.getStrategyName());
|
||||
assertFalse(fieldType.pointsOnly());
|
||||
fieldType.setStrategyName(SpatialStrategy.TERM.getStrategyName());
|
||||
assertTrue(fieldType.pointsOnly());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.query;
|
||||
|
||||
import com.vividsolutions.jts.geom.Coordinate;
|
||||
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
import org.apache.lucene.search.ConstantScoreQuery;
|
||||
import org.apache.lucene.search.MatchNoDocsQuery;
|
||||
|
@ -258,6 +259,15 @@ public class GeoShapeQueryBuilderTests extends AbstractQueryTestCase<GeoShapeQue
|
|||
assertThat(e.getMessage(), containsString("failed to find geo_shape field [unmapped]"));
|
||||
}
|
||||
|
||||
public void testWrongFieldType() throws IOException {
|
||||
assumeTrue("test runs only when at least a type is registered", getCurrentTypes().length > 0);
|
||||
ShapeType shapeType = ShapeType.randomType(random());
|
||||
ShapeBuilder shape = RandomShapeGenerator.createShapeWithin(random(), null, shapeType);
|
||||
final GeoShapeQueryBuilder queryBuilder = new GeoShapeQueryBuilder(STRING_FIELD_NAME, shape);
|
||||
QueryShardException e = expectThrows(QueryShardException.class, () -> queryBuilder.toQuery(createShardContext()));
|
||||
assertThat(e.getMessage(), containsString("Field [mapped_string] is not of type [geo_shape] but of type [text]"));
|
||||
}
|
||||
|
||||
public void testSerializationFailsUnlessFetched() throws IOException {
|
||||
QueryBuilder builder = doCreateTestQueryBuilder(true);
|
||||
QueryBuilder queryBuilder = Rewriteable.rewrite(builder, createShardContext());
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.ArrayList;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class CommitPointsTests extends ESTestCase {
|
||||
private final Logger logger = Loggers.getLogger(CommitPointsTests.class);
|
||||
|
||||
public void testCommitPointXContent() throws Exception {
|
||||
ArrayList<CommitPoint.FileInfo> indexFiles = new ArrayList<>();
|
||||
indexFiles.add(new CommitPoint.FileInfo("file1", "file1_p", 100, "ck1"));
|
||||
indexFiles.add(new CommitPoint.FileInfo("file2", "file2_p", 200, "ck2"));
|
||||
|
||||
ArrayList<CommitPoint.FileInfo> translogFiles = new ArrayList<>();
|
||||
translogFiles.add(new CommitPoint.FileInfo("t_file1", "t_file1_p", 100, null));
|
||||
translogFiles.add(new CommitPoint.FileInfo("t_file2", "t_file2_p", 200, null));
|
||||
|
||||
CommitPoint commitPoint = new CommitPoint(1, "test", CommitPoint.Type.GENERATED, indexFiles, translogFiles);
|
||||
|
||||
byte[] serialized = CommitPoints.toXContent(commitPoint);
|
||||
logger.info("serialized commit_point {}", new String(serialized, StandardCharsets.UTF_8));
|
||||
|
||||
CommitPoint desCp = CommitPoints.fromXContent(serialized);
|
||||
assertThat(desCp.version(), equalTo(commitPoint.version()));
|
||||
assertThat(desCp.name(), equalTo(commitPoint.name()));
|
||||
|
||||
assertThat(desCp.indexFiles().size(), equalTo(commitPoint.indexFiles().size()));
|
||||
for (int i = 0; i < desCp.indexFiles().size(); i++) {
|
||||
assertThat(desCp.indexFiles().get(i).name(), equalTo(commitPoint.indexFiles().get(i).name()));
|
||||
assertThat(desCp.indexFiles().get(i).physicalName(), equalTo(commitPoint.indexFiles().get(i).physicalName()));
|
||||
assertThat(desCp.indexFiles().get(i).length(), equalTo(commitPoint.indexFiles().get(i).length()));
|
||||
assertThat(desCp.indexFiles().get(i).checksum(), equalTo(commitPoint.indexFiles().get(i).checksum()));
|
||||
}
|
||||
|
||||
assertThat(desCp.translogFiles().size(), equalTo(commitPoint.translogFiles().size()));
|
||||
for (int i = 0; i < desCp.indexFiles().size(); i++) {
|
||||
assertThat(desCp.translogFiles().get(i).name(), equalTo(commitPoint.translogFiles().get(i).name()));
|
||||
assertThat(desCp.translogFiles().get(i).physicalName(), equalTo(commitPoint.translogFiles().get(i).physicalName()));
|
||||
assertThat(desCp.translogFiles().get(i).length(), equalTo(commitPoint.translogFiles().get(i).length()));
|
||||
assertThat(desCp.translogFiles().get(i).checksum(), nullValue());
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2854,6 +2854,10 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
t.join();
|
||||
}
|
||||
|
||||
// We need to wait for all ongoing merges to complete. The reason is that during a merge the
|
||||
// IndexWriter holds the core cache key open and causes the memory to be registered in the breaker
|
||||
primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(true));
|
||||
|
||||
// Close remaining searchers
|
||||
IOUtils.close(searchers);
|
||||
|
||||
|
|
|
@ -240,7 +240,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
|
|||
assertEquals(indicesService.numPendingDeletes(test.index()), 0);
|
||||
assertTrue(indicesService.hasUncompletedPendingDeletes()); // "bogus" index has not been removed
|
||||
}
|
||||
assertAcked(client().admin().indices().prepareOpen("test"));
|
||||
assertAcked(client().admin().indices().prepareOpen("test").setTimeout(TimeValue.timeValueSeconds(1)));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ import java.util.Set;
|
|||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.elasticsearch.common.util.set.Sets.newHashSet;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchQuery;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.queryStringQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -40,7 +39,6 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class AliasResolveRoutingIT extends ESIntegTestCase {
|
||||
|
||||
|
||||
// see https://github.com/elastic/elasticsearch/issues/13278
|
||||
public void testSearchClosedWildcardIndex() throws ExecutionException, InterruptedException {
|
||||
createIndex("test-0");
|
||||
|
@ -52,10 +50,17 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
|
|||
client().prepareIndex("test-0", "type1", "2").setSource("field1", "quick brown"),
|
||||
client().prepareIndex("test-0", "type1", "3").setSource("field1", "quick"));
|
||||
refresh("test-*");
|
||||
assertHitCount(client().prepareSearch().setIndices("alias-*").setIndicesOptions(IndicesOptions.lenientExpandOpen()).setQuery(queryStringQuery("quick")).get(), 3L);
|
||||
assertHitCount(
|
||||
client()
|
||||
.prepareSearch()
|
||||
.setIndices("alias-*")
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.setQuery(queryStringQuery("quick"))
|
||||
.get(),
|
||||
3L);
|
||||
}
|
||||
|
||||
public void testResolveIndexRouting() throws Exception {
|
||||
public void testResolveIndexRouting() {
|
||||
createIndex("test1");
|
||||
createIndex("test2");
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
@ -97,9 +102,10 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testResolveSearchRouting() throws Exception {
|
||||
public void testResolveSearchRouting() {
|
||||
createIndex("test1");
|
||||
createIndex("test2");
|
||||
createIndex("test3");
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
client().admin().indices().prepareAliases()
|
||||
|
@ -108,7 +114,10 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
|
|||
.addAliasAction(AliasActions.add().index("test2").alias("alias20").routing("0"))
|
||||
.addAliasAction(AliasActions.add().index("test2").alias("alias21").routing("1"))
|
||||
.addAliasAction(AliasActions.add().index("test1").alias("alias0").routing("0"))
|
||||
.addAliasAction(AliasActions.add().index("test2").alias("alias0").routing("0")).get();
|
||||
.addAliasAction(AliasActions.add().index("test2").alias("alias0").routing("0"))
|
||||
.addAliasAction(AliasActions.add().index("test3").alias("alias3tw").routing("tw "))
|
||||
.addAliasAction(AliasActions.add().index("test3").alias("alias3ltw").routing(" ltw "))
|
||||
.addAliasAction(AliasActions.add().index("test3").alias("alias3lw").routing(" lw")).get();
|
||||
|
||||
ClusterState state = clusterService().state();
|
||||
IndexNameExpressionResolver indexNameExpressionResolver = internalCluster().getInstance(IndexNameExpressionResolver.class);
|
||||
|
@ -118,7 +127,9 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
|
|||
assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias10"), equalTo(newMap("test1", newSet("0"))));
|
||||
assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0", "alias10"), equalTo(newMap("test1", newSet("0"))));
|
||||
assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "1", "alias10"), nullValue());
|
||||
assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, "alias0"), equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, null, "alias0"),
|
||||
equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
|
||||
|
||||
assertThat(indexNameExpressionResolver.resolveSearchRouting(state, null, new String[]{"alias10", "alias20"}),
|
||||
equalTo(newMap("test1", newSet("0"), "test2", newSet("0"))));
|
||||
|
@ -143,13 +154,42 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
|
|||
equalTo(newMap("test1", newSet("0"), "test2", newSet("1"))));
|
||||
assertThat(indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2", new String[]{"test1", "alias10", "alias21"}),
|
||||
equalTo(newMap("test1", newSet("0", "1", "2"), "test2", newSet("1"))));
|
||||
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "test1"),
|
||||
equalTo(newMap("test1", newSet("tw ", " ltw ", " lw"))));
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "alias3tw"),
|
||||
equalTo(newMap("test3", newSet("tw "))));
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "alias3ltw"),
|
||||
equalTo(newMap("test3", newSet(" ltw "))));
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, "tw , ltw , lw", "alias3lw"),
|
||||
equalTo(newMap("test3", newSet(" lw"))));
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, "0,tw , ltw , lw", "test1", "alias3ltw"),
|
||||
equalTo(newMap("test1", newSet("0", "tw ", " ltw ", " lw"), "test3", newSet(" ltw "))));
|
||||
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRouting(state, "0,1,2,tw , ltw , lw", (String[])null),
|
||||
equalTo(newMap(
|
||||
"test1", newSet("0", "1", "2", "tw ", " ltw ", " lw"),
|
||||
"test2", newSet("0", "1", "2", "tw ", " ltw ", " lw"),
|
||||
"test3", newSet("0", "1", "2", "tw ", " ltw ", " lw"))));
|
||||
|
||||
assertThat(
|
||||
indexNameExpressionResolver.resolveSearchRoutingAllIndices(state.metaData(), "0,1,2,tw , ltw , lw"),
|
||||
equalTo(newMap(
|
||||
"test1", newSet("0", "1", "2", "tw ", " ltw ", " lw"),
|
||||
"test2", newSet("0", "1", "2", "tw ", " ltw ", " lw"),
|
||||
"test3", newSet("0", "1", "2", "tw ", " ltw ", " lw"))));
|
||||
}
|
||||
|
||||
private <T> Set<T> newSet(T... elements) {
|
||||
return newHashSet(elements);
|
||||
}
|
||||
|
||||
|
||||
private <K, V> Map<K, V> newMap(K key, V value) {
|
||||
Map<K, V> r = new HashMap<>();
|
||||
r.put(key, value);
|
||||
|
@ -163,4 +203,12 @@ public class AliasResolveRoutingIT extends ESIntegTestCase {
|
|||
return r;
|
||||
}
|
||||
|
||||
private <K, V> Map<K, V> newMap(K key1, V value1, K key2, V value2, K key3, V value3) {
|
||||
Map<K, V> r = new HashMap<>();
|
||||
r.put(key1, value1);
|
||||
r.put(key2, value2);
|
||||
r.put(key3, value3);
|
||||
return r;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
|
@ -104,7 +105,7 @@ public class DefaultSearchContextTests extends ESTestCase {
|
|||
when(indexService.getIndexSettings()).thenReturn(indexSettings);
|
||||
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
|
||||
|
||||
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
|
||||
try (Directory dir = newDirectory();
|
||||
RandomIndexWriter w = new RandomIndexWriter(random(), dir);
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.apache.lucene.search.Scorer;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -46,7 +47,7 @@ public class MultiBucketAggregatorWrapperTests extends ESTestCase {
|
|||
public void testNoNullScorerIsDelegated() throws Exception {
|
||||
LeafReaderContext leafReaderContext = MemoryIndex.fromDocument(Collections.emptyList(), new MockAnalyzer(random()))
|
||||
.createSearcher().getIndexReader().leaves().get(0);
|
||||
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
SearchContext searchContext = mock(SearchContext.class);
|
||||
when(searchContext.bigArrays()).thenReturn(bigArrays);
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.lucene.search.TopDocs;
|
|||
import org.apache.lucene.store.Directory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.search.aggregations.AggregatorTestCase;
|
||||
import org.elasticsearch.search.aggregations.BucketCollector;
|
||||
|
@ -63,8 +64,8 @@ public class BestDocsDeferringCollectorTests extends AggregatorTestCase {
|
|||
TermQuery termQuery = new TermQuery(new Term("field", String.valueOf(randomInt(maxNumValues))));
|
||||
TopDocs topDocs = indexSearcher.search(termQuery, numDocs);
|
||||
|
||||
BestDocsDeferringCollector collector =
|
||||
new BestDocsDeferringCollector(numDocs, new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()));
|
||||
BestDocsDeferringCollector collector = new BestDocsDeferringCollector(numDocs,
|
||||
new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()));
|
||||
Set<Integer> deferredCollectedDocIds = new HashSet<>();
|
||||
collector.setDeferredCollector(Collections.singleton(testCollector(deferredCollectedDocIds)));
|
||||
collector.preCollection();
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.lucene.util.NumericUtils;
|
|||
import org.elasticsearch.common.network.InetAddresses;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.IpFieldMapper;
|
||||
import org.elasticsearch.index.mapper.KeywordFieldMapper;
|
||||
|
@ -912,7 +913,7 @@ public class TermsAggregatorTests extends AggregatorTestCase {
|
|||
dir.close();
|
||||
}
|
||||
InternalAggregation.ReduceContext ctx =
|
||||
new InternalAggregation.ReduceContext(new MockBigArrays(Settings.EMPTY,
|
||||
new InternalAggregation.ReduceContext(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY),
|
||||
new NoneCircuitBreakerService()), null, true);
|
||||
for (InternalAggregation internalAgg : aggs) {
|
||||
InternalAggregation mergedAggs = internalAgg.doReduce(aggs, ctx);
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.Writeable.Reader;
|
|||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.search.aggregations.ParsedAggregation;
|
||||
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
|
||||
|
@ -60,7 +61,7 @@ public class InternalCardinalityTests extends InternalAggregationTestCase<Intern
|
|||
protected InternalCardinality createTestInstance(String name,
|
||||
List<PipelineAggregator> pipelineAggregators, Map<String, Object> metaData) {
|
||||
HyperLogLogPlusPlus hllpp = new HyperLogLogPlusPlus(p,
|
||||
new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()), 1);
|
||||
new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), 1);
|
||||
algos.add(hllpp);
|
||||
for (int i = 0; i < 100; i++) {
|
||||
hllpp.collect(0, BitMixer.mix64(randomIntBetween(1, 100)));
|
||||
|
@ -107,7 +108,7 @@ public class InternalCardinalityTests extends InternalAggregationTestCase<Intern
|
|||
break;
|
||||
case 1:
|
||||
HyperLogLogPlusPlus newState = new HyperLogLogPlusPlus(state.precision(),
|
||||
new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService()), 0);
|
||||
new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService()), 0);
|
||||
newState.merge(0, state, 0);
|
||||
int extraValues = between(10, 100);
|
||||
for (int i = 0; i < extraValues; i++) {
|
||||
|
|
|
@ -83,16 +83,6 @@ public class DirectCandidateGeneratorTests extends ESTestCase {
|
|||
expectThrows(NullPointerException.class, () -> DirectCandidateGeneratorBuilder.resolveDistance(null));
|
||||
}
|
||||
|
||||
public void testLevensteinDeprecation() {
|
||||
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("levenstein"), instanceOf(LevensteinDistance.class));
|
||||
assertWarnings("Deprecated distance [levenstein] used, replaced by [levenshtein]");
|
||||
}
|
||||
|
||||
public void testJaroWinklerDeprecation() {
|
||||
assertThat(DirectCandidateGeneratorBuilder.resolveDistance("jaroWinkler"), instanceOf(JaroWinklerDistance.class));
|
||||
assertWarnings("Deprecated distance [jarowinkler] used, replaced by [jaro_winkler]");
|
||||
}
|
||||
|
||||
private static DirectCandidateGeneratorBuilder mutate(DirectCandidateGeneratorBuilder original) throws IOException {
|
||||
DirectCandidateGeneratorBuilder mutation = copy(original);
|
||||
List<Supplier<DirectCandidateGeneratorBuilder>> mutators = new ArrayList<>();
|
||||
|
|
|
@ -58,16 +58,6 @@ public class StringDistanceImplTests extends AbstractWriteableEnumTestCase {
|
|||
assertThat(e.getMessage(), equalTo("Input string is null"));
|
||||
}
|
||||
|
||||
public void testLevensteinDeprecation() {
|
||||
assertThat(StringDistanceImpl.resolve("levenstein"), equalTo(StringDistanceImpl.LEVENSHTEIN));
|
||||
assertWarnings("Deprecated distance [levenstein] used, replaced by [levenshtein]");
|
||||
}
|
||||
|
||||
public void testJaroWinklerDeprecation() {
|
||||
assertThat(StringDistanceImpl.resolve("jaroWinkler"), equalTo(StringDistanceImpl.JARO_WINKLER));
|
||||
assertWarnings("Deprecated distance [jarowinkler] used, replaced by [jaro_winkler]");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void testWriteTo() throws IOException {
|
||||
assertWriteToStream(StringDistanceImpl.INTERNAL, 0);
|
||||
|
@ -85,5 +75,4 @@ public class StringDistanceImplTests extends AbstractWriteableEnumTestCase {
|
|||
assertReadFromStream(3, StringDistanceImpl.JARO_WINKLER);
|
||||
assertReadFromStream(4, StringDistanceImpl.NGRAM);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -177,7 +178,7 @@ public class TcpTransportTests extends ESTestCase {
|
|||
try {
|
||||
TcpTransport transport = new TcpTransport(
|
||||
"test", Settings.builder().put("transport.tcp.compress", compressed).build(), threadPool,
|
||||
new BigArrays(Settings.EMPTY, null), null, null, null) {
|
||||
new BigArrays(new PageCacheRecycler(Settings.EMPTY), null), null, null, null) {
|
||||
|
||||
@Override
|
||||
protected FakeChannel bind(String name, InetSocketAddress address) throws IOException {
|
||||
|
@ -190,11 +191,6 @@ public class TcpTransportTests extends ESTestCase {
|
|||
return new FakeChannel(messageCaptor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumOpenServerConnections() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public NodeChannels getConnection(DiscoveryNode node) {
|
||||
int numConnections = MockTcpTransport.LIGHT_PROFILE.getNumConnections();
|
||||
|
|
|
@ -80,7 +80,7 @@ if [ -z "$ES_TMPDIR" ]; then
|
|||
mktemp_coreutils=$?
|
||||
set -e
|
||||
if [ $mktemp_coreutils -eq 0 ]; then
|
||||
ES_TMPDIR=`mktemp -d --tmpdir "elasticearch.XXXXXXXX"`
|
||||
ES_TMPDIR=`mktemp -d --tmpdir "elasticsearch.XXXXXXXX"`
|
||||
else
|
||||
ES_TMPDIR=`mktemp -d -t elasticsearch`
|
||||
fi
|
||||
|
|
|
@ -35,8 +35,17 @@ The Search API returns `400 - Bad request` while it would previously return
|
|||
* number of filters in the adjacency matrix aggregation is too large
|
||||
|
||||
|
||||
==== Scroll queries cannot use the request_cache anymore
|
||||
==== Scroll queries cannot use the `request_cache` anymore
|
||||
|
||||
Setting `request_cache:true` on a query that creates a scroll ('scroll=1m`)
|
||||
Setting `request_cache:true` on a query that creates a scroll (`scroll=1m`)
|
||||
has been deprecated in 6 and will now return a `400 - Bad request`.
|
||||
Scroll queries are not meant to be cached.
|
||||
|
||||
==== Term Suggesters supported distance algorithms
|
||||
|
||||
The following string distance algorithms were given additional names in 6.2 and
|
||||
their existing names were deprecated. The deprecated names have now been
|
||||
removed.
|
||||
|
||||
* `levenstein` - replaced by `levenshtein`
|
||||
* `jarowinkler` - replaced by `jaro_winkler`
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.common.ParseField;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.ContextParser;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
|
@ -160,7 +161,7 @@ public class InternalMatrixStatsTests extends InternalAggregationTestCase<Intern
|
|||
multiPassStats.computeStats(aValues, bValues);
|
||||
|
||||
ScriptService mockScriptService = mockScriptService();
|
||||
MockBigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
InternalAggregation.ReduceContext context =
|
||||
new InternalAggregation.ReduceContext(bigArrays, mockScriptService, true);
|
||||
InternalMatrixStats reduced = (InternalMatrixStats) shardResults.get(0).reduce(shardResults, context);
|
||||
|
|
|
@ -366,7 +366,7 @@ final class QueryAnalyzer {
|
|||
// positives for percolator queries with range queries than term based queries.
|
||||
// The is because the way number fields are extracted from the document to be percolated.
|
||||
// Per field a single range is extracted and if a percolator query has two or more range queries
|
||||
// on the same field than the the minimum should match can be higher than clauses in the CoveringQuery.
|
||||
// on the same field, then the minimum should match can be higher than clauses in the CoveringQuery.
|
||||
// Therefore right now the minimum should match is incremented once per number field when processing
|
||||
// the percolator query at index time.
|
||||
if (seenRangeFields.add(t[0].range.fieldName)) {
|
||||
|
|
|
@ -40,6 +40,7 @@ import io.netty.handler.codec.http.HttpObjectAggregator;
|
|||
import io.netty.handler.codec.http.HttpRequestDecoder;
|
||||
import io.netty.handler.codec.http.HttpResponseEncoder;
|
||||
import io.netty.handler.timeout.ReadTimeoutException;
|
||||
import io.netty.handler.timeout.ReadTimeoutHandler;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -86,7 +87,6 @@ import java.util.concurrent.TimeUnit;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import static org.elasticsearch.common.settings.Setting.boolSetting;
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_HEADERS;
|
||||
|
@ -105,6 +105,7 @@ import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_INIT
|
|||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PORT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_HOST;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_PUBLISH_PORT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_RESET_COOKIES;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_KEEP_ALIVE;
|
||||
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_TCP_NO_DELAY;
|
||||
|
@ -172,6 +173,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
|
|||
protected final ByteSizeValue tcpSendBufferSize;
|
||||
protected final ByteSizeValue tcpReceiveBufferSize;
|
||||
protected final RecvByteBufAllocator recvByteBufAllocator;
|
||||
private final int readTimeoutMillis;
|
||||
|
||||
protected final int maxCompositeBufferComponents;
|
||||
private final Dispatcher dispatcher;
|
||||
|
@ -220,6 +222,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
|
|||
this.tcpSendBufferSize = SETTING_HTTP_TCP_SEND_BUFFER_SIZE.get(settings);
|
||||
this.tcpReceiveBufferSize = SETTING_HTTP_TCP_RECEIVE_BUFFER_SIZE.get(settings);
|
||||
this.detailedErrorsEnabled = SETTING_HTTP_DETAILED_ERRORS_ENABLED.get(settings);
|
||||
this.readTimeoutMillis = Math.toIntExact(SETTING_HTTP_READ_TIMEOUT.get(settings).getMillis());
|
||||
|
||||
ByteSizeValue receivePredictor = SETTING_HTTP_NETTY_RECEIVE_PREDICTOR_SIZE.get(settings);
|
||||
recvByteBufAllocator = new FixedRecvByteBufAllocator(receivePredictor.bytesAsInt());
|
||||
|
@ -480,7 +483,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
|
|||
protected void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
|
||||
if (cause instanceof ReadTimeoutException) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Connection timeout [{}]", ctx.channel().remoteAddress());
|
||||
logger.trace("Read timeout [{}]", ctx.channel().remoteAddress());
|
||||
}
|
||||
ctx.channel().close();
|
||||
} else {
|
||||
|
@ -524,6 +527,7 @@ public class Netty4HttpServerTransport extends AbstractLifecycleComponent implem
|
|||
@Override
|
||||
protected void initChannel(Channel ch) throws Exception {
|
||||
ch.pipeline().addLast("openChannels", transport.serverOpenChannels);
|
||||
ch.pipeline().addLast("read_timeout", new ReadTimeoutHandler(transport.readTimeoutMillis, TimeUnit.MILLISECONDS));
|
||||
final HttpRequestDecoder decoder = new HttpRequestDecoder(
|
||||
Math.toIntExact(transport.maxInitialLineLength.getBytes()),
|
||||
Math.toIntExact(transport.maxHeaderSize.getBytes()),
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.network.NetworkService;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.http.netty4.Netty4HttpServerTransport;
|
||||
|
@ -76,6 +77,7 @@ public class Netty4Plugin extends Plugin implements NetworkPlugin {
|
|||
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
|
|
@ -104,8 +104,6 @@ public class Netty4Transport extends TcpTransport {
|
|||
protected final int workerCount;
|
||||
protected final ByteSizeValue receivePredictorMin;
|
||||
protected final ByteSizeValue receivePredictorMax;
|
||||
// package private for testing
|
||||
volatile Netty4OpenChannelsHandler serverOpenChannels;
|
||||
protected volatile Bootstrap bootstrap;
|
||||
protected final Map<String, ServerBootstrap> serverBootstraps = newConcurrentMap();
|
||||
|
||||
|
@ -132,8 +130,6 @@ public class Netty4Transport extends TcpTransport {
|
|||
try {
|
||||
bootstrap = createBootstrap();
|
||||
if (NetworkService.NETWORK_SERVER.get(settings)) {
|
||||
final Netty4OpenChannelsHandler openChannels = new Netty4OpenChannelsHandler(logger);
|
||||
this.serverOpenChannels = openChannels;
|
||||
for (ProfileSettings profileSettings : profileSettings) {
|
||||
createServerBootstrap(profileSettings);
|
||||
bindServer(profileSettings);
|
||||
|
@ -242,12 +238,6 @@ public class Netty4Transport extends TcpTransport {
|
|||
onException(channel.attr(CHANNEL_KEY).get(), t instanceof Exception ? (Exception) t : new ElasticsearchException(t));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumOpenServerConnections() {
|
||||
Netty4OpenChannelsHandler channels = serverOpenChannels;
|
||||
return channels == null ? 0 : channels.numberOfOpenChannels();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected NettyTcpChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> listener)
|
||||
throws IOException {
|
||||
|
@ -294,7 +284,7 @@ public class Netty4Transport extends TcpTransport {
|
|||
@Override
|
||||
@SuppressForbidden(reason = "debug")
|
||||
protected void stopInternal() {
|
||||
Releasables.close(serverOpenChannels, () -> {
|
||||
Releasables.close(() -> {
|
||||
final List<Tuple<String, Future<?>>> serverBootstrapCloseFutures = new ArrayList<>(serverBootstraps.size());
|
||||
for (final Map.Entry<String, ServerBootstrap> entry : serverBootstraps.entrySet()) {
|
||||
serverBootstrapCloseFutures.add(
|
||||
|
@ -349,7 +339,6 @@ public class Netty4Transport extends TcpTransport {
|
|||
ch.attr(CHANNEL_KEY).set(nettyTcpChannel);
|
||||
serverAcceptedChannel(nettyTcpChannel);
|
||||
ch.pipeline().addLast("logging", new ESLoggingHandler());
|
||||
ch.pipeline().addLast("open_channels", Netty4Transport.this.serverOpenChannels);
|
||||
ch.pipeline().addLast("size", new Netty4SizeHeaderFrameDecoder());
|
||||
ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(Netty4Transport.this, name));
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ public class NettyTcpChannel implements TcpChannel {
|
|||
}
|
||||
});
|
||||
channel.writeAndFlush(Netty4Utils.toByteBuf(reference), writePromise);
|
||||
|
||||
|
||||
if (channel.eventLoop().isShutdown()) {
|
||||
listener.onFailure(new TransportException("Cannot send message, event loop is shutting down."));
|
||||
}
|
||||
|
@ -105,4 +105,12 @@ public class NettyTcpChannel implements TcpChannel {
|
|||
public Channel getLowLevelChannel() {
|
||||
return channel;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "NettyTcpChannel{" +
|
||||
"localAddress=" + getLocalAddress() +
|
||||
", remoteAddress=" + channel.remoteAddress() +
|
||||
'}';
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.ByteArray;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
@ -99,7 +100,7 @@ public class Netty4HttpChannelTests extends ESTestCase {
|
|||
public void setup() throws Exception {
|
||||
networkService = new NetworkService(Collections.emptyList());
|
||||
threadPool = new TestThreadPool("test");
|
||||
bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.network.NetworkService;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
import org.elasticsearch.http.NullDispatcher;
|
||||
|
@ -74,7 +75,7 @@ public class Netty4HttpServerPipeliningTests extends ESTestCase {
|
|||
public void setup() throws Exception {
|
||||
networkService = new NetworkService(Collections.emptyList());
|
||||
threadPool = new TestThreadPool("test");
|
||||
bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
@After
|
||||
|
|
|
@ -19,8 +19,15 @@
|
|||
|
||||
package org.elasticsearch.http.netty4;
|
||||
|
||||
import io.netty.bootstrap.Bootstrap;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.channel.ChannelFuture;
|
||||
import io.netty.channel.ChannelHandlerAdapter;
|
||||
import io.netty.channel.ChannelInitializer;
|
||||
import io.netty.channel.nio.NioEventLoopGroup;
|
||||
import io.netty.channel.socket.SocketChannel;
|
||||
import io.netty.channel.socket.nio.NioSocketChannel;
|
||||
import io.netty.handler.codec.TooLongFrameException;
|
||||
import io.netty.handler.codec.http.DefaultFullHttpRequest;
|
||||
import io.netty.handler.codec.http.FullHttpRequest;
|
||||
|
@ -39,7 +46,9 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.http.BindHttpException;
|
||||
import org.elasticsearch.http.HttpServerTransport;
|
||||
|
@ -63,6 +72,8 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
|
@ -93,7 +104,7 @@ public class Netty4HttpServerTransportTests extends ESTestCase {
|
|||
public void setup() throws Exception {
|
||||
networkService = new NetworkService(Collections.emptyList());
|
||||
threadPool = new TestThreadPool("test");
|
||||
bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
}
|
||||
|
||||
@After
|
||||
|
@ -313,4 +324,53 @@ public class Netty4HttpServerTransportTests extends ESTestCase {
|
|||
assertNull(threadPool.getThreadContext().getTransient("bar_bad"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadTimeout() throws Exception {
|
||||
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {
|
||||
|
||||
@Override
|
||||
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
|
||||
throw new AssertionError("Should not have received a dispatched request");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dispatchBadRequest(final RestRequest request,
|
||||
final RestChannel channel,
|
||||
final ThreadContext threadContext,
|
||||
final Throwable cause) {
|
||||
throw new AssertionError("Should not have received a dispatched request");
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
Settings settings = Settings.builder()
|
||||
.put(HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT.getKey(), new TimeValue(randomIntBetween(100, 300)))
|
||||
.build();
|
||||
|
||||
|
||||
NioEventLoopGroup group = new NioEventLoopGroup();
|
||||
try (Netty4HttpServerTransport transport =
|
||||
new Netty4HttpServerTransport(settings, networkService, bigArrays, threadPool, xContentRegistry(), dispatcher)) {
|
||||
transport.start();
|
||||
final TransportAddress remoteAddress = randomFrom(transport.boundAddress.boundAddresses());
|
||||
|
||||
AtomicBoolean channelClosed = new AtomicBoolean(false);
|
||||
|
||||
Bootstrap clientBootstrap = new Bootstrap().channel(NioSocketChannel.class).handler(new ChannelInitializer<SocketChannel>() {
|
||||
|
||||
@Override
|
||||
protected void initChannel(SocketChannel ch) {
|
||||
ch.pipeline().addLast(new ChannelHandlerAdapter() {});
|
||||
|
||||
}
|
||||
}).group(group);
|
||||
ChannelFuture connect = clientBootstrap.connect(remoteAddress.address());
|
||||
connect.channel().closeFuture().addListener(future -> channelClosed.set(true));
|
||||
|
||||
assertBusy(() -> assertTrue("Channel should be closed due to read timeout", channelClosed.get()), 5, TimeUnit.SECONDS);
|
||||
|
||||
} finally {
|
||||
group.shutdownGracefully().await();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.mocksocket.MockSocket;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
@ -63,7 +64,7 @@ public class Netty4SizeHeaderFrameDecoderTests extends ESTestCase {
|
|||
public void startThreadPool() {
|
||||
threadPool = new ThreadPool(settings);
|
||||
NetworkService networkService = new NetworkService(Collections.emptyList());
|
||||
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
nettyTransport = new Netty4Transport(settings, threadPool, networkService, bigArrays,
|
||||
new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService());
|
||||
nettyTransport.start();
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.network.NetworkModule;
|
|||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.plugins.NetworkPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -90,6 +91,7 @@ public class Netty4TransportIT extends ESNetty4IntegTestCase {
|
|||
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.network.NetworkUtils;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
|
@ -116,7 +117,7 @@ public class NettyTransportMultiPortTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private TcpTransport startTransport(Settings settings, ThreadPool threadPool) {
|
||||
BigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
BigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
TcpTransport transport = new Netty4Transport(settings, threadPool, new NetworkService(Collections.emptyList()),
|
||||
bigArrays, new NamedWriteableRegistry(Collections.emptyList()), new NoneCircuitBreakerService());
|
||||
transport.start();
|
||||
|
|
|
@ -615,7 +615,6 @@ public class FullClusterRestartIT extends ESRestTestCase {
|
|||
* Tests recovery of an index with or without a translog and the
|
||||
* statistics we gather about that.
|
||||
*/
|
||||
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/27649")
|
||||
public void testRecovery() throws IOException {
|
||||
int count;
|
||||
boolean shouldHaveTranslog;
|
||||
|
|
|
@ -70,8 +70,8 @@ public class MockBigArrays extends BigArrays {
|
|||
private final PageCacheRecycler recycler;
|
||||
private final CircuitBreakerService breakerService;
|
||||
|
||||
public MockBigArrays(Settings settings, CircuitBreakerService breakerService) {
|
||||
this(new MockPageCacheRecycler(settings), breakerService, false);
|
||||
public MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService) {
|
||||
this(recycler, breakerService, false);
|
||||
}
|
||||
|
||||
private MockBigArrays(PageCacheRecycler recycler, CircuitBreakerService breakerService, boolean checkBreaker) {
|
||||
|
|
|
@ -57,7 +57,7 @@ public class MockPageCacheRecycler extends PageCacheRecycler {
|
|||
|
||||
private final Random random;
|
||||
|
||||
MockPageCacheRecycler(Settings settings) {
|
||||
public MockPageCacheRecycler(Settings settings) {
|
||||
super(settings);
|
||||
// we always initialize with 0 here since we really only wanna have some random bytes / ints / longs
|
||||
// and given the fact that it's called concurrently it won't reproduces anyway the same order other than in a unittest
|
||||
|
|
|
@ -67,7 +67,6 @@ import org.elasticsearch.index.store.DirectoryService;
|
|||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoverySourceHandler;
|
||||
|
@ -309,7 +308,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Takes an existing shard, closes it and and starts a new initialing shard at the same location
|
||||
* Takes an existing shard, closes it and starts a new initialing shard at the same location
|
||||
*
|
||||
* @param listeners new listerns to use for the newly created shard
|
||||
*/
|
||||
|
@ -321,7 +320,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
/**
|
||||
* Takes an existing shard, closes it and and starts a new initialing shard at the same location
|
||||
* Takes an existing shard, closes it and starts a new initialing shard at the same location
|
||||
*
|
||||
* @param routing the shard routing to use for the newly created shard.
|
||||
* @param listeners new listerns to use for the newly created shard
|
||||
|
|
|
@ -30,6 +30,8 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
|
@ -82,11 +84,19 @@ public class MockNode extends Node {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected BigArrays createBigArrays(Settings settings, CircuitBreakerService circuitBreakerService) {
|
||||
protected BigArrays createBigArrays(PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService) {
|
||||
if (getPluginsService().filterPlugins(NodeMocksPlugin.class).isEmpty()) {
|
||||
return super.createBigArrays(settings, circuitBreakerService);
|
||||
return super.createBigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
}
|
||||
return new MockBigArrays(settings, circuitBreakerService);
|
||||
return new MockBigArrays(pageCacheRecycler, circuitBreakerService);
|
||||
}
|
||||
|
||||
@Override
|
||||
PageCacheRecycler createPageCacheRecycler(Settings settings) {
|
||||
if (getPluginsService().filterPlugins(NodeMocksPlugin.class).isEmpty()) {
|
||||
return super.createPageCacheRecycler(settings);
|
||||
}
|
||||
return new MockPageCacheRecycler(settings);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.common.lease.Releasables;
|
|||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.cache.bitset.BitsetFilterCache;
|
||||
|
@ -112,7 +113,7 @@ public abstract class AggregatorTestCase extends ESTestCase {
|
|||
CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
|
||||
when(searchContext.aggregations())
|
||||
.thenReturn(new SearchContextAggregations(AggregatorFactories.EMPTY, bucketConsumer));
|
||||
when(searchContext.bigArrays()).thenReturn(new MockBigArrays(Settings.EMPTY, circuitBreakerService));
|
||||
when(searchContext.bigArrays()).thenReturn(new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), circuitBreakerService));
|
||||
// TODO: now just needed for top_hits, this will need to be revised for other agg unit tests:
|
||||
MapperService mapperService = mapperServiceMock();
|
||||
when(mapperService.getIndexSettings()).thenReturn(indexSettings);
|
||||
|
|
|
@ -2076,7 +2076,7 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
try {
|
||||
INSTANCE.printTestMessage("cleaning up after");
|
||||
INSTANCE.afterInternal(true);
|
||||
checkStaticState();
|
||||
checkStaticState(true);
|
||||
} finally {
|
||||
INSTANCE = null;
|
||||
}
|
||||
|
|
|
@ -288,7 +288,7 @@ public abstract class ESTestCase extends LuceneTestCase {
|
|||
|
||||
@After
|
||||
public final void after() throws Exception {
|
||||
checkStaticState();
|
||||
checkStaticState(false);
|
||||
// We check threadContext != null rather than enableWarningsCheck()
|
||||
// because after methods are still called in the event that before
|
||||
// methods failed, in which case threadContext might not have been
|
||||
|
@ -394,8 +394,10 @@ public abstract class ESTestCase extends LuceneTestCase {
|
|||
}
|
||||
|
||||
// separate method so that this can be checked again after suite scoped cluster is shut down
|
||||
protected static void checkStaticState() throws Exception {
|
||||
MockPageCacheRecycler.ensureAllPagesAreReleased();
|
||||
protected static void checkStaticState(boolean afterClass) throws Exception {
|
||||
if (afterClass) {
|
||||
MockPageCacheRecycler.ensureAllPagesAreReleased();
|
||||
}
|
||||
MockBigArrays.ensureAllArraysAreReleased();
|
||||
|
||||
// ensure no one changed the status logger level on us
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.common.util.MockPageCacheRecycler;
|
||||
import org.elasticsearch.common.xcontent.ContextParser;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -236,7 +237,7 @@ public abstract class InternalAggregationTestCase<T extends InternalAggregation>
|
|||
toReduce.add(t);
|
||||
}
|
||||
ScriptService mockScriptService = mockScriptService();
|
||||
MockBigArrays bigArrays = new MockBigArrays(Settings.EMPTY, new NoneCircuitBreakerService());
|
||||
MockBigArrays bigArrays = new MockBigArrays(new MockPageCacheRecycler(Settings.EMPTY), new NoneCircuitBreakerService());
|
||||
if (randomBoolean() && toReduce.size() > 1) {
|
||||
// sometimes do an incremental reduce
|
||||
Collections.shuffle(toReduce, random());
|
||||
|
|
|
@ -72,7 +72,7 @@ public class LongGCDisruption extends SingleNodeDisruption {
|
|||
assert isDisruptedNodeThread(currentThreadName) == false :
|
||||
"current thread match pattern. thread name: " + currentThreadName + ", node: " + disruptedNode;
|
||||
// we spawn a background thread to protect against deadlock which can happen
|
||||
// if there are shared resources between caller thread and and suspended threads
|
||||
// if there are shared resources between caller thread and suspended threads
|
||||
// see unsafeClasses to how to avoid that
|
||||
final AtomicReference<Exception> suspendingError = new AtomicReference<>();
|
||||
final Thread suspendingThread = new Thread(new AbstractRunnable() {
|
||||
|
|
|
@ -109,6 +109,10 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
|
||||
protected abstract MockTransportService build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake);
|
||||
|
||||
protected int channelsPerNodeConnection() {
|
||||
return 13;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Before
|
||||
public void setUp() throws Exception {
|
||||
|
@ -2345,6 +2349,24 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testAcceptedChannelCount() throws Exception {
|
||||
assertBusy(() -> {
|
||||
TransportStats transportStats = serviceA.transport.getStats();
|
||||
assertEquals(channelsPerNodeConnection(), transportStats.getServerOpen());
|
||||
});
|
||||
assertBusy(() -> {
|
||||
TransportStats transportStats = serviceB.transport.getStats();
|
||||
assertEquals(channelsPerNodeConnection(), transportStats.getServerOpen());
|
||||
});
|
||||
|
||||
serviceA.close();
|
||||
|
||||
assertBusy(() -> {
|
||||
TransportStats transportStats = serviceB.transport.getStats();
|
||||
assertEquals(0, transportStats.getServerOpen());
|
||||
});
|
||||
}
|
||||
|
||||
public void testTransportStatsWithException() throws Exception {
|
||||
MockTransportService serviceC = build(Settings.builder().put("name", "TS_TEST").build(), version0, null, true);
|
||||
CountDownLatch receivedLatch = new CountDownLatch(1);
|
||||
|
|
|
@ -217,11 +217,6 @@ public class MockTcpTransport extends TcpTransport {
|
|||
socket.setReuseAddress(TCP_REUSE_ADDRESS.get(settings));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumOpenServerConnections() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
public final class MockChannel implements Closeable, TcpChannel {
|
||||
private final AtomicBoolean isOpen = new AtomicBoolean(true);
|
||||
private final InetSocketAddress localAddress;
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.network.NetworkModule;
|
|||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.plugins.NetworkPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -39,6 +40,7 @@ public class MockTcpTransportPlugin extends Plugin implements NetworkPlugin {
|
|||
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
|
|
@ -19,9 +19,16 @@
|
|||
|
||||
package org.elasticsearch.transport.nio;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.common.lease.Releasable;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayDeque;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
|
@ -30,39 +37,56 @@ import java.util.function.Supplier;
|
|||
* the pages internally. If more space is needed at the end of the buffer {@link #ensureCapacity(long)} can
|
||||
* be called and the buffer will expand using the supplier provided.
|
||||
*/
|
||||
public final class InboundChannelBuffer {
|
||||
public final class InboundChannelBuffer implements Releasable {
|
||||
|
||||
private static final int PAGE_SIZE = 1 << 14;
|
||||
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
|
||||
private static final int PAGE_MASK = PAGE_SIZE - 1;
|
||||
private static final int PAGE_SHIFT = Integer.numberOfTrailingZeros(PAGE_SIZE);
|
||||
private static final ByteBuffer[] EMPTY_BYTE_BUFFER_ARRAY = new ByteBuffer[0];
|
||||
|
||||
|
||||
private final ArrayDeque<ByteBuffer> pages;
|
||||
private final Supplier<ByteBuffer> pageSupplier;
|
||||
private final ArrayDeque<Page> pages;
|
||||
private final Supplier<Page> pageSupplier;
|
||||
private final AtomicBoolean isClosed = new AtomicBoolean(false);
|
||||
|
||||
private long capacity = 0;
|
||||
private long internalIndex = 0;
|
||||
// The offset is an int as it is the offset of where the bytes begin in the first buffer
|
||||
private int offset = 0;
|
||||
|
||||
public InboundChannelBuffer() {
|
||||
this(() -> ByteBuffer.wrap(new byte[PAGE_SIZE]));
|
||||
}
|
||||
|
||||
private InboundChannelBuffer(Supplier<ByteBuffer> pageSupplier) {
|
||||
public InboundChannelBuffer(Supplier<Page> pageSupplier) {
|
||||
this.pageSupplier = pageSupplier;
|
||||
this.pages = new ArrayDeque<>();
|
||||
this.capacity = PAGE_SIZE * pages.size();
|
||||
ensureCapacity(PAGE_SIZE);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (isClosed.compareAndSet(false, true)) {
|
||||
Page page;
|
||||
List<RuntimeException> closingExceptions = new ArrayList<>();
|
||||
while ((page = pages.pollFirst()) != null) {
|
||||
try {
|
||||
page.close();
|
||||
} catch (RuntimeException e) {
|
||||
closingExceptions.add(e);
|
||||
}
|
||||
}
|
||||
ExceptionsHelper.rethrowAndSuppress(closingExceptions);
|
||||
}
|
||||
}
|
||||
|
||||
public void ensureCapacity(long requiredCapacity) {
|
||||
if (isClosed.get()) {
|
||||
throw new IllegalStateException("Cannot allocate new pages if the buffer is closed.");
|
||||
}
|
||||
if (capacity < requiredCapacity) {
|
||||
int numPages = numPages(requiredCapacity + offset);
|
||||
int pagesToAdd = numPages - pages.size();
|
||||
for (int i = 0; i < pagesToAdd; i++) {
|
||||
pages.addLast(pageSupplier.get());
|
||||
Page page = pageSupplier.get();
|
||||
pages.addLast(page);
|
||||
}
|
||||
capacity += pagesToAdd * PAGE_SIZE;
|
||||
}
|
||||
|
@ -81,7 +105,7 @@ public final class InboundChannelBuffer {
|
|||
|
||||
int pagesToRelease = pageIndex(offset + bytesToRelease);
|
||||
for (int i = 0; i < pagesToRelease; i++) {
|
||||
pages.removeFirst();
|
||||
pages.removeFirst().close();
|
||||
}
|
||||
capacity -= bytesToRelease;
|
||||
internalIndex = Math.max(internalIndex - bytesToRelease, 0);
|
||||
|
@ -112,12 +136,12 @@ public final class InboundChannelBuffer {
|
|||
}
|
||||
|
||||
ByteBuffer[] buffers = new ByteBuffer[pageCount];
|
||||
Iterator<ByteBuffer> pageIterator = pages.iterator();
|
||||
ByteBuffer firstBuffer = pageIterator.next().duplicate();
|
||||
Iterator<Page> pageIterator = pages.iterator();
|
||||
ByteBuffer firstBuffer = pageIterator.next().byteBuffer.duplicate();
|
||||
firstBuffer.position(firstBuffer.position() + offset);
|
||||
buffers[0] = firstBuffer;
|
||||
for (int i = 1; i < buffers.length; i++) {
|
||||
buffers[i] = pageIterator.next().duplicate();
|
||||
buffers[i] = pageIterator.next().byteBuffer.duplicate();
|
||||
}
|
||||
if (finalLimit != 0) {
|
||||
buffers[buffers.length - 1].limit(finalLimit);
|
||||
|
@ -148,11 +172,11 @@ public final class InboundChannelBuffer {
|
|||
int indexInPage = indexInPage(indexWithOffset);
|
||||
|
||||
ByteBuffer[] buffers = new ByteBuffer[pages.size() - pageIndex];
|
||||
Iterator<ByteBuffer> pageIterator = pages.descendingIterator();
|
||||
Iterator<Page> pageIterator = pages.descendingIterator();
|
||||
for (int i = buffers.length - 1; i > 0; --i) {
|
||||
buffers[i] = pageIterator.next().duplicate();
|
||||
buffers[i] = pageIterator.next().byteBuffer.duplicate();
|
||||
}
|
||||
ByteBuffer firstPostIndexBuffer = pageIterator.next().duplicate();
|
||||
ByteBuffer firstPostIndexBuffer = pageIterator.next().byteBuffer.duplicate();
|
||||
firstPostIndexBuffer.position(firstPostIndexBuffer.position() + indexInPage);
|
||||
buffers[0] = firstPostIndexBuffer;
|
||||
|
||||
|
@ -201,4 +225,21 @@ public final class InboundChannelBuffer {
|
|||
private int indexInPage(long index) {
|
||||
return (int) (index & PAGE_MASK);
|
||||
}
|
||||
|
||||
public static class Page implements Releasable {
|
||||
|
||||
private final ByteBuffer byteBuffer;
|
||||
private final Releasable releasable;
|
||||
|
||||
public Page(ByteBuffer byteBuffer, Releasable releasable) {
|
||||
this.byteBuffer = byteBuffer;
|
||||
this.releasable = releasable;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
releasable.close();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,17 +34,12 @@ public class NioShutdown {
|
|||
this.logger = logger;
|
||||
}
|
||||
|
||||
void orderlyShutdown(OpenChannels openChannels, ArrayList<AcceptingSelector> acceptors, ArrayList<SocketSelector> socketSelectors) {
|
||||
|
||||
// Start by closing the server channels. Once these are closed, we are guaranteed to no accept new connections
|
||||
openChannels.closeServerChannels();
|
||||
void orderlyShutdown(ArrayList<AcceptingSelector> acceptors, ArrayList<SocketSelector> socketSelectors) {
|
||||
|
||||
for (AcceptingSelector acceptor : acceptors) {
|
||||
shutdownSelector(acceptor);
|
||||
}
|
||||
|
||||
openChannels.close();
|
||||
|
||||
for (SocketSelector selector : socketSelectors) {
|
||||
shutdownSelector(selector);
|
||||
}
|
||||
|
|
|
@ -24,16 +24,17 @@ import org.elasticsearch.action.ActionListener;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.recycler.Recycler;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TcpTransport;
|
||||
import org.elasticsearch.transport.Transports;
|
||||
import org.elasticsearch.transport.nio.channel.NioChannel;
|
||||
import org.elasticsearch.transport.nio.channel.NioServerSocketChannel;
|
||||
import org.elasticsearch.transport.nio.channel.NioSocketChannel;
|
||||
import org.elasticsearch.transport.nio.channel.TcpChannelFactory;
|
||||
|
@ -44,6 +45,7 @@ import org.elasticsearch.transport.nio.channel.TcpWriteContext;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ThreadFactory;
|
||||
|
@ -67,7 +69,7 @@ public class NioTransport extends TcpTransport {
|
|||
public static final Setting<Integer> NIO_ACCEPTOR_COUNT =
|
||||
intSetting("transport.nio.acceptor_count", 1, 1, Setting.Property.NodeScope);
|
||||
|
||||
private final OpenChannels openChannels = new OpenChannels(logger);
|
||||
private final PageCacheRecycler pageCacheRecycler;
|
||||
private final ConcurrentMap<String, TcpChannelFactory> profileToChannelFactory = newConcurrentMap();
|
||||
private final ArrayList<AcceptingSelector> acceptors = new ArrayList<>();
|
||||
private final ArrayList<SocketSelector> socketSelectors = new ArrayList<>();
|
||||
|
@ -76,31 +78,23 @@ public class NioTransport extends TcpTransport {
|
|||
private int acceptorNumber;
|
||||
|
||||
public NioTransport(Settings settings, ThreadPool threadPool, NetworkService networkService, BigArrays bigArrays,
|
||||
NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) {
|
||||
PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry,
|
||||
CircuitBreakerService circuitBreakerService) {
|
||||
super("nio", settings, threadPool, bigArrays, circuitBreakerService, namedWriteableRegistry, networkService);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getNumOpenServerConnections() {
|
||||
return openChannels.serverChannelsCount();
|
||||
this.pageCacheRecycler = pageCacheRecycler;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TcpNioServerSocketChannel bind(String name, InetSocketAddress address) throws IOException {
|
||||
TcpChannelFactory channelFactory = this.profileToChannelFactory.get(name);
|
||||
AcceptingSelector selector = acceptors.get(++acceptorNumber % NioTransport.NIO_ACCEPTOR_COUNT.get(settings));
|
||||
TcpNioServerSocketChannel serverChannel = channelFactory.openNioServerSocketChannel(address, selector);
|
||||
openChannels.serverChannelOpened(serverChannel);
|
||||
serverChannel.addCloseListener(ActionListener.wrap(() -> openChannels.channelClosed(serverChannel)));
|
||||
return serverChannel;
|
||||
return channelFactory.openNioServerSocketChannel(address, selector);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected TcpNioSocketChannel initiateChannel(DiscoveryNode node, TimeValue connectTimeout, ActionListener<Void> connectListener)
|
||||
throws IOException {
|
||||
TcpNioSocketChannel channel = clientChannelFactory.openNioChannel(node.getAddress().address(), clientSelectorSupplier.get());
|
||||
openChannels.clientChannelOpened(channel);
|
||||
channel.addCloseListener(ActionListener.wrap(() -> openChannels.channelClosed(channel)));
|
||||
channel.addConnectListener(connectListener);
|
||||
return channel;
|
||||
}
|
||||
|
@ -169,7 +163,7 @@ public class NioTransport extends TcpTransport {
|
|||
@Override
|
||||
protected void stopInternal() {
|
||||
NioShutdown nioShutdown = new NioShutdown(logger);
|
||||
nioShutdown.orderlyShutdown(openChannels, acceptors, socketSelectors);
|
||||
nioShutdown.orderlyShutdown(acceptors, socketSelectors);
|
||||
|
||||
profileToChannelFactory.clear();
|
||||
socketSelectors.clear();
|
||||
|
@ -184,14 +178,18 @@ public class NioTransport extends TcpTransport {
|
|||
}
|
||||
|
||||
private Consumer<NioSocketChannel> getContextSetter(String profileName) {
|
||||
return (c) -> c.setContexts(new TcpReadContext(c, new TcpReadHandler(profileName,this)), new TcpWriteContext(c),
|
||||
this::exceptionCaught);
|
||||
return (c) -> {
|
||||
Supplier<InboundChannelBuffer.Page> pageSupplier = () -> {
|
||||
Recycler.V<byte[]> bytes = pageCacheRecycler.bytePage(false);
|
||||
return new InboundChannelBuffer.Page(ByteBuffer.wrap(bytes.v()), bytes);
|
||||
};
|
||||
c.setContexts(new TcpReadContext(c, new TcpReadHandler(profileName, this), new InboundChannelBuffer(pageSupplier)),
|
||||
new TcpWriteContext(c), this::exceptionCaught);
|
||||
};
|
||||
}
|
||||
|
||||
private void acceptChannel(NioSocketChannel channel) {
|
||||
TcpNioSocketChannel tcpChannel = (TcpNioSocketChannel) channel;
|
||||
openChannels.acceptedChannelOpened(tcpChannel);
|
||||
tcpChannel.addCloseListener(ActionListener.wrap(() -> openChannels.channelClosed(channel)));
|
||||
serverAcceptedChannel(tcpChannel);
|
||||
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
|
|||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.PageCacheRecycler;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.plugins.NetworkPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
@ -38,6 +39,7 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin {
|
|||
|
||||
@Override
|
||||
public Map<String, Supplier<Transport>> getTransports(Settings settings, ThreadPool threadPool, BigArrays bigArrays,
|
||||
PageCacheRecycler pageCacheRecycler,
|
||||
CircuitBreakerService circuitBreakerService,
|
||||
NamedWriteableRegistry namedWriteableRegistry,
|
||||
NetworkService networkService) {
|
||||
|
@ -49,6 +51,7 @@ public class NioTransportPlugin extends Plugin implements NetworkPlugin {
|
|||
settings1 = settings;
|
||||
}
|
||||
return Collections.singletonMap(NIO_TRANSPORT_NAME,
|
||||
() -> new NioTransport(settings1, threadPool, networkService, bigArrays, namedWriteableRegistry, circuitBreakerService));
|
||||
() -> new NioTransport(settings1, threadPool, networkService, bigArrays, pageCacheRecycler, namedWriteableRegistry,
|
||||
circuitBreakerService));
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue