Merge branch 'master' into index-lifecycle

This commit is contained in:
Colin Goodheart-Smithe 2018-05-15 09:59:55 +01:00
commit 8059aa7d55
No known key found for this signature in database
GPG Key ID: F975E7BDD739B3C7
208 changed files with 2881 additions and 1663 deletions

View File

@ -744,6 +744,7 @@ class BuildPlugin implements Plugin<Project> {
additionalTest.testClassesDir = test.testClassesDir additionalTest.testClassesDir = test.testClassesDir
additionalTest.configure(commonTestConfig(project)) additionalTest.configure(commonTestConfig(project))
additionalTest.configure(config) additionalTest.configure(config)
additionalTest.dependsOn(project.tasks.testClasses)
test.dependsOn(additionalTest) test.dependsOn(additionalTest)
}); });
return test return test

View File

@ -225,6 +225,7 @@ public class RestTestsFromSnippetsTask extends SnippetsTask {
* warning every time. */ * warning every time. */
current.println(" - skip:") current.println(" - skip:")
current.println(" features: ") current.println(" features: ")
current.println(" - default_shards")
current.println(" - stash_in_key") current.println(" - stash_in_key")
current.println(" - stash_in_path") current.println(" - stash_in_path")
current.println(" - stash_path_replace") current.println(" - stash_path_replace")

View File

@ -18,27 +18,19 @@
*/ */
package org.elasticsearch.client.benchmark.rest; package org.elasticsearch.client.benchmark.rest;
import org.apache.http.HttpEntity;
import org.apache.http.HttpHeaders; import org.apache.http.HttpHeaders;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.HttpStatus; import org.apache.http.HttpStatus;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.benchmark.AbstractBenchmark; import org.elasticsearch.client.benchmark.AbstractBenchmark;
import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor; import org.elasticsearch.client.benchmark.ops.bulk.BulkRequestExecutor;
import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor; import org.elasticsearch.client.benchmark.ops.search.SearchRequestExecutor;
import java.io.IOException; import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Locale; import java.util.Locale;
@ -86,9 +78,10 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
bulkRequestBody.append(bulkItem); bulkRequestBody.append(bulkItem);
bulkRequestBody.append("\n"); bulkRequestBody.append("\n");
} }
HttpEntity entity = new NStringEntity(bulkRequestBody.toString(), ContentType.APPLICATION_JSON); Request request = new Request("POST", "/geonames/type/_noop_bulk");
request.setJsonEntity(bulkRequestBody.toString());
try { try {
Response response = client.performRequest("POST", "/geonames/type/_noop_bulk", Collections.emptyMap(), entity); Response response = client.performRequest(request);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
} catch (Exception e) { } catch (Exception e) {
throw new ElasticsearchException(e); throw new ElasticsearchException(e);
@ -107,9 +100,10 @@ public final class RestClientBenchmark extends AbstractBenchmark<RestClient> {
@Override @Override
public boolean search(String source) { public boolean search(String source) {
HttpEntity searchBody = new NStringEntity(source, StandardCharsets.UTF_8); Request request = new Request("GET", endpoint);
request.setJsonEntity(source);
try { try {
Response response = client.performRequest("GET", endpoint, Collections.emptyMap(), searchBody); Response response = client.performRequest(request);
return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK;
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException(e); throw new ElasticsearchException(e);

View File

@ -194,18 +194,16 @@ public class BulkProcessorIT extends ESRestHighLevelClientTestCase {
} }
public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception { public void testBulkProcessorConcurrentRequestsReadOnlyIndex() throws Exception {
Request request = new Request("PUT", "/test-ro");
String createIndexBody = "{\n" + request.setJsonEntity("{\n" +
" \"settings\" : {\n" + " \"settings\" : {\n" +
" \"index\" : {\n" + " \"index\" : {\n" +
" \"blocks.write\" : true\n" + " \"blocks.write\" : true\n" +
" }\n" + " }\n" +
" }\n" + " }\n" +
" \n" + " \n" +
"}"; "}");
Response response = client().performRequest(request);
NStringEntity entity = new NStringEntity(createIndexBody, ContentType.APPLICATION_JSON);
Response response = client().performRequest("PUT", "/test-ro", Collections.emptyMap(), entity);
assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
int bulkActions = randomIntBetween(10, 100); int bulkActions = randomIntBetween(10, 100);

View File

@ -19,9 +19,6 @@
package org.elasticsearch.client; package org.elasticsearch.client;
import org.apache.http.client.methods.HttpPut;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ElasticsearchStatusException;
import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.DocWriteRequest;
@ -39,6 +36,7 @@ import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.get.MultiGetResponse; import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -147,11 +145,10 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
GetRequest getRequest = new GetRequest("index", "type", "id"); GetRequest getRequest = new GetRequest("index", "type", "id");
assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); assertFalse(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
} }
String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; IndexRequest index = new IndexRequest("index", "type", "id");
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); index.source("{\"field1\":\"value1\",\"field2\":\"value2\"}", XContentType.JSON);
Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), index.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
stringEntity); highLevelClient().index(index);
assertEquals(201, response.getStatusLine().getStatusCode());
{ {
GetRequest getRequest = new GetRequest("index", "type", "id"); GetRequest getRequest = new GetRequest("index", "type", "id");
assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync)); assertTrue(execute(getRequest, highLevelClient()::exists, highLevelClient()::existsAsync));
@ -175,12 +172,11 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage()); assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", exception.getMessage());
assertEquals("index", exception.getMetadata("es.index").get(0)); assertEquals("index", exception.getMetadata("es.index").get(0));
} }
IndexRequest index = new IndexRequest("index", "type", "id");
String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}"; String document = "{\"field1\":\"value1\",\"field2\":\"value2\"}";
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); index.source(document, XContentType.JSON);
Response response = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id", Collections.singletonMap("refresh", "wait_for"), index.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
stringEntity); highLevelClient().index(index);
assertEquals(201, response.getStatusLine().getStatusCode());
{ {
GetRequest getRequest = new GetRequest("index", "type", "id").version(2); GetRequest getRequest = new GetRequest("index", "type", "id").version(2);
ElasticsearchException exception = expectThrows(ElasticsearchException.class, ElasticsearchException exception = expectThrows(ElasticsearchException.class,
@ -271,18 +267,15 @@ public class CrudIT extends ESRestHighLevelClientTestCase {
assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]", assertEquals("Elasticsearch exception [type=index_not_found_exception, reason=no such index]",
response.getResponses()[1].getFailure().getFailure().getMessage()); response.getResponses()[1].getFailure().getFailure().getMessage());
} }
BulkRequest bulk = new BulkRequest();
String document = "{\"field\":\"value1\"}"; bulk.setRefreshPolicy(RefreshPolicy.IMMEDIATE);
StringEntity stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); IndexRequest index = new IndexRequest("index", "type", "id1");
Response r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id1", Collections.singletonMap("refresh", "true"), index.source("{\"field\":\"value1\"}", XContentType.JSON);
stringEntity); bulk.add(index);
assertEquals(201, r.getStatusLine().getStatusCode()); index = new IndexRequest("index", "type", "id2");
index.source("{\"field\":\"value2\"}", XContentType.JSON);
document = "{\"field\":\"value2\"}"; bulk.add(index);
stringEntity = new StringEntity(document, ContentType.APPLICATION_JSON); highLevelClient().bulk(bulk);
r = client().performRequest(HttpPut.METHOD_NAME, "/index/type/id2", Collections.singletonMap("refresh", "true"), stringEntity);
assertEquals(201, r.getStatusLine().getStatusCode());
{ {
MultiGetRequest multiGetRequest = new MultiGetRequest(); MultiGetRequest multiGetRequest = new MultiGetRequest();
multiGetRequest.add("index", "type", "id1"); multiGetRequest.add("index", "type", "id1");

View File

@ -312,14 +312,14 @@ public class SearchIT extends ESRestHighLevelClientTestCase {
MatrixStats matrixStats = searchResponse.getAggregations().get("agg1"); MatrixStats matrixStats = searchResponse.getAggregations().get("agg1");
assertEquals(5, matrixStats.getFieldCount("num")); assertEquals(5, matrixStats.getFieldCount("num"));
assertEquals(56d, matrixStats.getMean("num"), 0d); assertEquals(56d, matrixStats.getMean("num"), 0d);
assertEquals(1830d, matrixStats.getVariance("num"), 0d); assertEquals(1830.0000000000002, matrixStats.getVariance("num"), 0d);
assertEquals(0.09340198804973046, matrixStats.getSkewness("num"), 0d); assertEquals(0.09340198804973039, matrixStats.getSkewness("num"), 0d);
assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d); assertEquals(1.2741646510794589, matrixStats.getKurtosis("num"), 0d);
assertEquals(5, matrixStats.getFieldCount("num2")); assertEquals(5, matrixStats.getFieldCount("num2"));
assertEquals(29d, matrixStats.getMean("num2"), 0d); assertEquals(29d, matrixStats.getMean("num2"), 0d);
assertEquals(330d, matrixStats.getVariance("num2"), 0d); assertEquals(330d, matrixStats.getVariance("num2"), 0d);
assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 1.0e-16); assertEquals(-0.13568039346585542, matrixStats.getSkewness("num2"), 1.0e-16);
assertEquals(1.3517561983471074, matrixStats.getKurtosis("num2"), 0d); assertEquals(1.3517561983471071, matrixStats.getKurtosis("num2"), 0d);
assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d); assertEquals(-767.5, matrixStats.getCovariance("num", "num2"), 0d);
assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d); assertEquals(-0.9876336291667923, matrixStats.getCorrelation("num", "num2"), 0d);
} }

View File

@ -19,8 +19,6 @@
package org.elasticsearch.client.documentation; package org.elasticsearch.client.documentation;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.nio.entity.NStringEntity; import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchException;
@ -49,6 +47,7 @@ import org.elasticsearch.action.support.replication.ReplicationResponse;
import org.elasticsearch.action.update.UpdateRequest; import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse; import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
@ -58,6 +57,7 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.VersionType; import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -271,16 +271,15 @@ public class CRUDDocumentationIT extends ESRestHighLevelClientTestCase {
IndexResponse indexResponse = client.index(indexRequest); IndexResponse indexResponse = client.index(indexRequest);
assertSame(indexResponse.status(), RestStatus.CREATED); assertSame(indexResponse.status(), RestStatus.CREATED);
XContentType xContentType = XContentType.JSON; Request request = new Request("POST", "/_scripts/increment-field");
String script = Strings.toString(XContentBuilder.builder(xContentType.xContent()) request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder()
.startObject() .startObject()
.startObject("script") .startObject("script")
.field("lang", "painless") .field("lang", "painless")
.field("code", "ctx._source.field += params.count") .field("code", "ctx._source.field += params.count")
.endObject() .endObject()
.endObject()); .endObject()));
HttpEntity body = new NStringEntity(script, ContentType.create(xContentType.mediaType())); Response response = client().performRequest(request);
Response response = client().performRequest(HttpPost.METHOD_NAME, "/_scripts/increment-field", emptyMap(), body);
assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus());
} }
{ {

View File

@ -30,6 +30,7 @@ import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.index.IndexResponse; import org.elasticsearch.action.index.IndexResponse;
import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.ESRestHighLevelClientTestCase;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterHealthStatus;
@ -66,58 +67,22 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF
* -------------------------------------------------- * --------------------------------------------------
*/ */
public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase { public class MigrationDocumentationIT extends ESRestHighLevelClientTestCase {
public void testCreateIndex() throws IOException {
RestHighLevelClient client = highLevelClient();
{
//tag::migration-create-index
Settings indexSettings = Settings.builder() // <1>
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
String payload = Strings.toString(XContentFactory.jsonBuilder() // <2>
.startObject()
.startObject("settings") // <3>
.value(indexSettings)
.endObject()
.startObject("mappings") // <4>
.startObject("doc")
.startObject("properties")
.startObject("time")
.field("type", "date")
.endObject()
.endObject()
.endObject()
.endObject()
.endObject());
HttpEntity entity = new NStringEntity(payload, ContentType.APPLICATION_JSON); // <5>
Response response = client.getLowLevelClient().performRequest("PUT", "my-index", emptyMap(), entity); // <6>
if (response.getStatusLine().getStatusCode() != HttpStatus.SC_OK) {
// <7>
}
//end::migration-create-index
assertEquals(200, response.getStatusLine().getStatusCode());
}
}
public void testClusterHealth() throws IOException { public void testClusterHealth() throws IOException {
RestHighLevelClient client = highLevelClient(); RestHighLevelClient client = highLevelClient();
{ {
//tag::migration-cluster-health //tag::migration-cluster-health
Map<String, String> parameters = singletonMap("wait_for_status", "green"); Request request = new Request("GET", "/_cluster/health");
Response response = client.getLowLevelClient().performRequest("GET", "/_cluster/health", parameters); // <1> request.addParameter("wait_for_status", "green"); // <1>
Response response = client.getLowLevelClient().performRequest(request); // <2>
ClusterHealthStatus healthStatus; ClusterHealthStatus healthStatus;
try (InputStream is = response.getEntity().getContent()) { // <2> try (InputStream is = response.getEntity().getContent()) { // <3>
Map<String, Object> map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <3> Map<String, Object> map = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); // <4>
healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <4> healthStatus = ClusterHealthStatus.fromString((String) map.get("status")); // <5>
} }
if (healthStatus == ClusterHealthStatus.GREEN) { if (healthStatus != ClusterHealthStatus.GREEN) {
// <5> // <6>
} }
//end::migration-cluster-health //end::migration-cluster-health
assertSame(ClusterHealthStatus.GREEN, healthStatus); assertSame(ClusterHealthStatus.GREEN, healthStatus);

View File

@ -800,7 +800,7 @@ public class SearchDocumentationIT extends ESRestHighLevelClientTestCase {
double qualityLevel = evalQuality.getQualityLevel(); // <3> double qualityLevel = evalQuality.getQualityLevel(); // <3>
assertEquals(1.0 / 3.0, qualityLevel, 0.0); assertEquals(1.0 / 3.0, qualityLevel, 0.0);
List<RatedSearchHit> hitsAndRatings = evalQuality.getHitsAndRatings(); List<RatedSearchHit> hitsAndRatings = evalQuality.getHitsAndRatings();
RatedSearchHit ratedSearchHit = hitsAndRatings.get(0); RatedSearchHit ratedSearchHit = hitsAndRatings.get(2);
assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4> assertEquals("3", ratedSearchHit.getSearchHit().getId()); // <4>
assertFalse(ratedSearchHit.getRating().isPresent()); // <5> assertFalse(ratedSearchHit.getRating().isPresent()); // <5>
MetricDetail metricDetails = evalQuality.getMetricDetails(); MetricDetail metricDetails = evalQuality.getMetricDetails();

View File

@ -26,13 +26,16 @@ import java.util.Map;
import org.apache.http.Header; import org.apache.http.Header;
import org.apache.http.HttpEntity; import org.apache.http.HttpEntity;
import org.apache.http.entity.ByteArrayEntity;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader; import org.apache.http.message.BasicHeader;
import org.apache.http.nio.entity.NStringEntity; import org.apache.http.nio.entity.NStringEntity;
import org.elasticsearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory;
import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull; import static org.junit.Assert.assertNull;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
@ -151,6 +154,103 @@ public class RequestTests extends RestClientTestCase {
assertArrayEquals(headers, request.getHeaders()); assertArrayEquals(headers, request.getHeaders());
} }
// TODO equals and hashcode public void testEqualsAndHashCode() {
Request request = randomRequest();
assertEquals(request, request);
Request copy = copy(request);
assertEquals(request, copy);
assertEquals(copy, request);
assertEquals(request.hashCode(), copy.hashCode());
Request mutant = mutate(request);
assertNotEquals(request, mutant);
assertNotEquals(mutant, request);
}
private Request randomRequest() {
Request request = new Request(
randomFrom(new String[] {"GET", "PUT", "DELETE", "POST", "HEAD", "OPTIONS"}),
randomAsciiAlphanumOfLength(5));
int parameterCount = between(0, 5);
for (int i = 0; i < parameterCount; i++) {
request.addParameter(randomAsciiAlphanumOfLength(i), randomAsciiLettersOfLength(3));
}
if (randomBoolean()) {
if (randomBoolean()) {
request.setJsonEntity(randomAsciiAlphanumOfLength(10));
} else {
request.setEntity(randomFrom(new HttpEntity[] {
new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON),
new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON),
new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON)
}));
}
}
if (randomBoolean()) {
int headerCount = between(1, 5);
Header[] headers = new Header[headerCount];
for (int i = 0; i < headerCount; i++) {
headers[i] = new BasicHeader(randomAsciiAlphanumOfLength(3), randomAsciiAlphanumOfLength(3));
}
request.setHeaders(headers);
}
if (randomBoolean()) {
request.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(1));
}
return request;
}
private Request copy(Request request) {
Request copy = new Request(request.getMethod(), request.getEndpoint());
copyMutables(request, copy);
return copy;
}
private Request mutate(Request request) {
if (randomBoolean()) {
// Mutate request or method but keep everything else constant
Request mutant = randomBoolean()
? new Request(request.getMethod() + "m", request.getEndpoint())
: new Request(request.getMethod(), request.getEndpoint() + "m");
copyMutables(request, mutant);
return mutant;
}
Request mutant = copy(request);
int mutationType = between(0, 3);
switch (mutationType) {
case 0:
mutant.addParameter(randomAsciiAlphanumOfLength(mutant.getParameters().size() + 4), "extra");
return mutant;
case 1:
mutant.setJsonEntity("mutant"); // randomRequest can't produce this value
return mutant;
case 2:
if (mutant.getHeaders().length > 0) {
mutant.setHeaders(new Header[0]);
} else {
mutant.setHeaders(new BasicHeader("extra", "m"));
}
return mutant;
case 3:
mutant.setHttpAsyncResponseConsumerFactory(new HeapBufferedResponseConsumerFactory(5));
return mutant;
default:
throw new UnsupportedOperationException("Unknown mutation type [" + mutationType + "]");
}
}
private void copyMutables(Request from, Request to) {
for (Map.Entry<String, String> param : from.getParameters().entrySet()) {
to.addParameter(param.getKey(), param.getValue());
}
to.setEntity(from.getEntity());
to.setHeaders(from.getHeaders());
to.setHttpAsyncResponseConsumerFactory(from.getHttpAsyncResponseConsumerFactory());
}
} }

View File

@ -351,11 +351,12 @@ public class RestClientSingleHostIntegTests extends RestClientTestCase {
private Response bodyTest(final RestClient restClient, final String method) throws IOException { private Response bodyTest(final RestClient restClient, final String method) throws IOException {
String requestBody = "{ \"field\": \"value\" }"; String requestBody = "{ \"field\": \"value\" }";
StringEntity entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON);
int statusCode = randomStatusCode(getRandom()); int statusCode = randomStatusCode(getRandom());
Request request = new Request(method, "/" + statusCode);
request.setJsonEntity(requestBody);
Response esResponse; Response esResponse;
try { try {
esResponse = restClient.performRequest(method, "/" + statusCode, Collections.<String, String>emptyMap(), entity); esResponse = restClient.performRequest(request);
} catch(ResponseException e) { } catch(ResponseException e) {
esResponse = e.getResponse(); esResponse = e.getResponse();
} }

View File

@ -58,11 +58,9 @@ import java.net.SocketTimeoutException;
import java.net.URI; import java.net.URI;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ExecutorService; import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors; import java.util.concurrent.Executors;
import java.util.concurrent.Future; import java.util.concurrent.Future;

View File

@ -96,7 +96,7 @@ public class RestClientTests extends RestClientTestCase {
} }
/** /**
* @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testSetParameters()}. * @deprecated will remove method in 7.0 but needs tests until then. Replaced by {@link RequestTests#testAddParameters()}.
*/ */
@Deprecated @Deprecated
public void testPerformOldStyleAsyncWithNullParams() throws Exception { public void testPerformOldStyleAsyncWithNullParams() throws Exception {

View File

@ -21,18 +21,22 @@ package org.elasticsearch.test.rest;
import org.apache.http.entity.ContentType; import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity; import org.apache.http.entity.StringEntity;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.junit.Before;
import java.io.IOException; import java.io.IOException;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap; import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.startsWith; import static org.hamcrest.Matchers.startsWith;
/** /**
* Tests for the "Location" header returned when returning {@code 201 CREATED}. * Tests for the "Location" header returned when returning {@code 201 CREATED}.
*/ */
public class CreatedLocationHeaderIT extends ESRestTestCase { public class CreatedLocationHeaderIT extends ESRestTestCase {
public void testCreate() throws IOException { public void testCreate() throws IOException {
locationTestCase("PUT", "test/test/1"); locationTestCase("PUT", "test/test/1");
} }
@ -54,8 +58,11 @@ public class CreatedLocationHeaderIT extends ESRestTestCase {
private void locationTestCase(String method, String url) throws IOException { private void locationTestCase(String method, String url) throws IOException {
locationTestCase(client().performRequest(method, url, emptyMap(), locationTestCase(client().performRequest(method, url, emptyMap(),
new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON)));
// we have to delete the index otherwise the second indexing request will route to the single shard and not produce a 201
final Response response = client().performRequest(new Request("DELETE", "test"));
assertThat(response.getStatusLine().getStatusCode(), equalTo(200));
locationTestCase(client().performRequest(method, url + "?routing=cat", emptyMap(), locationTestCase(client().performRequest(method, url + "?routing=cat", emptyMap(),
new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON))); new StringEntity("{\"test\": \"test\"}", ContentType.APPLICATION_JSON)));
} }
private void locationTestCase(Response response) throws IOException { private void locationTestCase(Response response) throws IOException {
@ -65,4 +72,5 @@ public class CreatedLocationHeaderIT extends ESRestTestCase {
Response getResponse = client().performRequest("GET", location); Response getResponse = client().performRequest("GET", location);
assertEquals(singletonMap("test", "test"), entityAsMap(getResponse).get("_source")); assertEquals(singletonMap("test", "test"), entityAsMap(getResponse).get("_source"));
} }
} }

View File

@ -1,257 +0,0 @@
[[es-release-notes]]
= {es} Release Notes
[partintro]
--
// To add a release, copy and paste the template text
// and add a link to the new section. Note that release subheads must
// be floated and sections cannot be empty.
// Use these for links to issue and pulls. Note issues and pulls redirect one to
// each other on Github, so don't worry too much on using the right prefix.
:issue: https://github.com/elastic/elasticsearch/issues/
:pull: https://github.com/elastic/elasticsearch/pull/
This section summarizes the changes in each release.
* <<release-notes-7.0.0>>
* <<release-notes-6.4.0>>
* <<release-notes-6.3.1>>
--
////
// To add a release, copy and paste the following text, uncomment the relevant
// sections, and add a link to the new section in the list of releases at the
// top of the page. Note that release subheads must be floated and sections
// cannot be empty.
// TEMPLATE:
// [[release-notes-n.n.n]]
// == {es} n.n.n
//[float]
[[breaking-n.n.n]]
//=== Breaking Changes
//[float]
//=== Breaking Java Changes
//[float]
//=== Deprecations
//[float]
//=== New Features
//[float]
//=== Enhancements
//[float]
//=== Bug Fixes
//[float]
//=== Regressions
//[float]
//=== Known Issues
////
[[release-notes-7.0.0]]
== {es} 7.0.0
coming[7.0.0]
[float]
[[breaking-7.0.0]]
=== Breaking Changes
<<write-thread-pool-fallback, Removed `thread_pool.bulk.*` settings and
`es.thread_pool.write.use_bulk_as_display_name` system property>> ({pull}29609[#29609])
<<systemd-service-file-config, Systemd service file is no longer marked as configuration>> ({pull}29004[#29004])
<<remove-suggest-metric, Removed `suggest` metric on stats APIs>> ({pull}29635[#29635])
<<remove-field-caps-body, In field capabilities APIs, removed support for providing fields in the request body>> ({pull}30185[#30185])
Machine Learning::
* The `max_running_jobs` node property is removed in this release. Use the
`xpack.ml.max_open_jobs` setting instead. For more information, see <<ml-settings>>.
* <<remove-http-enabled, Removed `http.enabled` setting>> ({pull}29601[#29601])
//[float]
//=== Breaking Java Changes
[float]
=== Deprecations
Monitoring::
* The `xpack.monitoring.collection.interval` setting can no longer be set to `-1`
to disable monitoring data collection. Use `xpack.monitoring.collection.enabled`
and set it to `false` (its default), which was added in 6.3.0.
Security::
* The fields returned as part of the mappings section by get index, get
mappings, get field mappings, and field capabilities API are now only the
ones that the user is authorized to access in case field level security is enabled.
//[float]
//=== New Features
//[float]
//=== Enhancements
[float]
=== Bug Fixes
Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310])
Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365])
Fixed prerelease version of elasticsearch in the `deb` package to sort before GA versions
({pull}29000[#29000])
Rollup::
* Validate timezone in range queries to ensure they match the selected job when
searching ({pull}30338[#30338])
SQL::
* Fix parsing of Dates containing milliseconds ({pull}30419[#30419])
[float]
=== Regressions
Fail snapshot operations early when creating or deleting a snapshot on a repository that has been
written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140])
Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641])
Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332])
//[float]
//=== Regressions
//[float]
//=== Known Issues
[[release-notes-6.4.0]]
== {es} 6.4.0
coming[6.4.0]
//[float]
[[breaking-6.4.0]]
//=== Breaking Changes
//[float]
//=== Breaking Java Changes
[float]
=== Deprecations
Deprecated multi-argument versions of the request methods in the RestClient.
Prefer the "Request" object flavored methods. ({pull}30315[#30315])
[float]
=== New Features
The new <<mapping-ignored-field,`_ignored`>> field allows to know which fields
got ignored at index time because of the <<ignore-malformed,`ignore_malformed`>>
option. ({pull}30140[#29658])
A new analysis plugin called `analysis_nori` that exposes the Lucene Korean
analysis module. ({pull}30397[#30397])
[float]
=== Enhancements
{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow
copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404])
Added new "Request" object flavored request methods in the RestClient. Prefer
these instead of the multi-argument versions. ({pull}29623[#29623])
Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447])
Watcher HTTP client used in watches now allows more parallel connections to the
same endpoint and evicts long running connections. ({pull}30130[#30130])
The cluster state listener to decide if watcher should be
stopped/started/paused now runs far less code in an executor but is more
synchronous and predictable. Also the trigger engine thread is only started on
data nodes. And the Execute Watch API can be triggered regardless is watcher is
started or stopped. ({pull}30118[#30118])
Added put index template API to the high level rest client ({pull}30400[#30400])
Add ability to filter coordinating-only nodes when interacting with cluster
APIs. ({pull}30313[#30313])
[float]
=== Bug Fixes
Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310])
Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365])
Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216])
Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641])
Machine Learning::
* Account for gaps in data counts after job is reopened ({pull}30294[#30294])
Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376])
Rollup::
* Validate timezone in range queries to ensure they match the selected job when
searching ({pull}30338[#30338])
SQL::
* Fix parsing of Dates containing milliseconds ({pull}30419[#30419])
Allocation::
Auto-expand replicas when adding or removing nodes to prevent shard copies from
being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423])
//[float]
//=== Regressions
//[float]
//=== Known Issues
[[release-notes-6.3.1]]
== Elasticsearch version 6.3.1
coming[6.3.1]
//[float]
[[breaking-6.3.1]]
//=== Breaking Changes
//[float]
//=== Breaking Java Changes
//[float]
//=== Deprecations
//[float]
//=== New Features
//[float]
//=== Enhancements
[float]
=== Bug Fixes
Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180])
Respect accept header on requests with no handler ({pull}30383[#30383])
SQL::
* Fix parsing of Dates containing milliseconds ({pull}30419[#30419])
//[float]
//=== Regressions
//[float]
//=== Known Issues

View File

@ -2,7 +2,7 @@
== Migration Guide == Migration Guide
This section describes how to migrate existing code from the `TransportClient` This section describes how to migrate existing code from the `TransportClient`
to the new Java High Level REST Client released with the version 5.6.0 to the Java High Level REST Client released with the version 5.6.0
of Elasticsearch. of Elasticsearch.
=== Motivations around a new Java client === Motivations around a new Java client
@ -107,9 +107,6 @@ More importantly, the high-level client:
request constructors like `new IndexRequest()` to create requests request constructors like `new IndexRequest()` to create requests
objects. The requests are then executed using synchronous or objects. The requests are then executed using synchronous or
asynchronous dedicated methods like `client.index()` or `client.indexAsync()`. asynchronous dedicated methods like `client.index()` or `client.indexAsync()`.
- does not provide indices or cluster management APIs. Management
operations can be executed by external scripts or
<<java-rest-high-level-migration-manage-indices, using the low-level client>>.
==== How to migrate the way requests are built ==== How to migrate the way requests are built
@ -241,71 +238,6 @@ returned by the cluster.
<4> The `onFailure()` method is called when an error occurs <4> The `onFailure()` method is called when an error occurs
during the execution of the request. during the execution of the request.
[[java-rest-high-level-migration-manage-indices]]
==== Manage Indices using the Low-Level REST Client
The low-level client is able to execute any kind of HTTP requests, and can
therefore be used to call the APIs that are not yet supported by the high level client.
For example, creating a new index with the `TransportClient` may look like this:
[source,java]
--------------------------------------------------
Settings settings = Settings.builder() // <1>
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_NUMBER_OF_REPLICAS, 0)
.build();
String mappings = XContentFactory.jsonBuilder() // <2>
.startObject()
.startObject("doc")
.startObject("properties")
.startObject("time")
.field("type", "date")
.endObject()
.endObject()
.endObject()
.endObject()
.string();
CreateIndexResponse response = transportClient.admin().indices() // <3>
.prepareCreate("my-index")
.setSettings(indexSettings)
.addMapping("doc", docMapping, XContentType.JSON)
.get();
if (response.isAcknowledged() == false) {
// <4>
}
--------------------------------------------------
<1> Define the settings of the index
<2> Define the mapping for document of type `doc` using a
`XContentBuilder`
<3> Create the index with the previous settings and mapping
using the `prepareCreate()` method. The execution is synchronous
and blocks on the `get()` method until the remote cluster returns
a response.
<4> Handle the situation where the index has not been created
The same operation executed with the low-level client could be:
["source","java",subs="attributes,callouts,macros"]
--------------------------------------------------
include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-create-index]
--------------------------------------------------
<1> Define the settings of the index
<2> Define the body of the HTTP request using a `XContentBuilder` with JSON format
<3> Include the settings in the request body
<4> Include the mappings in the request body
<5> Convert the request body from `String` to a `HttpEntity` and
set its content type (here, JSON)
<6> Execute the request using the low-level client. The execution is synchronous
and blocks on the `performRequest()` method until the remote cluster returns
a response. The low-level client can be retrieved from an existing `RestHighLevelClient`
instance through the `getLowLevelClient` getter method.
<7> Handle the situation where the index has not been created
[[java-rest-high-level-migration-cluster-health]] [[java-rest-high-level-migration-cluster-health]]
==== Checking Cluster Health using the Low-Level REST Client ==== Checking Cluster Health using the Low-Level REST Client
@ -331,18 +263,18 @@ With the low-level client, the code can be changed to:
-------------------------------------------------- --------------------------------------------------
include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-health] include-tagged::{doc-tests}/MigrationDocumentationIT.java[migration-cluster-health]
-------------------------------------------------- --------------------------------------------------
<1> Call the cluster's health REST endpoint and wait for the cluster health to become green, <1> Set up the request to wait for the cluster's health to become green if it isn't already.
then get back a `Response` object. <2> Make the request and the get back a `Response` object.
<2> Retrieve an `InputStream` object in order to read the response's content <3> Retrieve an `InputStream` object in order to read the response's content
<3> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This <4> Parse the response's content using Elasticsearch's helper class `XContentHelper`. This
helper requires the content type of the response to be passed as an argument and returns helper requires the content type of the response to be passed as an argument and returns
a `Map` of objects. Values in the map can be of any type, including inner `Map` that are a `Map` of objects. Values in the map can be of any type, including inner `Map` that are
used to represent the JSON object hierarchy. used to represent the JSON object hierarchy.
<4> Retrieve the value of the `status` field in the response map, casts it as a a `String` <5> Retrieve the value of the `status` field in the response map, casts it as a a `String`
object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus` object and use the `ClusterHealthStatus.fromString()` method to convert it as a `ClusterHealthStatus`
object. This method throws an exception if the value does not corresponds to a valid cluster object. This method throws an exception if the value does not corresponds to a valid cluster
health status. health status.
<5> Handle the situation where the cluster's health is not green <6> Handle the situation where the cluster's health is not green
Note that for convenience this example uses Elasticsearch's helpers to parse the JSON response Note that for convenience this example uses Elasticsearch's helpers to parse the JSON response
body, but any other JSON parser could have been use instead. body, but any other JSON parser could have been use instead.

View File

@ -137,8 +137,8 @@ Possible response:
"took": 25, "took": 25,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },

View File

@ -60,8 +60,8 @@ The response for the above aggregation:
"aggregations": { "aggregations": {
"centroid": { "centroid": {
"location": { "location": {
"lat": 51.00982963806018, "lat": 51.009829603135586,
"lon": 3.9662131061777472 "lon": 3.9662130642682314
}, },
"count": 6 "count": 6
} }
@ -113,8 +113,8 @@ The response for the above aggregation:
"doc_count": 3, "doc_count": 3,
"centroid": { "centroid": {
"location": { "location": {
"lat": 52.371655656024814, "lat": 52.371655642054975,
"lon": 4.909563297405839 "lon": 4.9095632415264845
}, },
"count": 3 "count": 3
} }
@ -125,7 +125,7 @@ The response for the above aggregation:
"centroid": { "centroid": {
"location": { "location": {
"lat": 48.86055548675358, "lat": 48.86055548675358,
"lon": 2.3316944623366 "lon": 2.331694420427084
}, },
"count": 2 "count": 2
} }

View File

@ -9,20 +9,6 @@ Input text is lowercased, normalized to remove extended characters, sorted,
deduplicated and concatenated into a single token. If a stopword list is deduplicated and concatenated into a single token. If a stopword list is
configured, stop words will also be removed. configured, stop words will also be removed.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-standard-tokenizer,Standard Tokenizer>>
Token Filters (in order)::
1. <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
2. <<analysis-asciifolding-tokenfilter>>
3. <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
4. <<analysis-fingerprint-tokenfilter>>
[float] [float]
=== Example output === Example output
@ -149,3 +135,46 @@ The above example produces the following term:
--------------------------- ---------------------------
[ consistent godel said sentence yes ] [ consistent godel said sentence yes ]
--------------------------- ---------------------------
[float]
=== Definition
The `fingerprint` tokenizer consists of:
Tokenizer::
* <<analysis-standard-tokenizer,Standard Tokenizer>>
Token Filters (in order)::
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-asciifolding-tokenfilter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
* <<analysis-fingerprint-tokenfilter>>
If you need to customize the `fingerprint` analyzer beyond the configuration
parameters then you need to recreate it as a `custom` analyzer and modify
it, usually by adding token filters. This would recreate the built-in
`fingerprint` analyzer and you can use it as a starting point for further
customization:
[source,js]
----------------------------------------------------
PUT /fingerprint_example
{
"settings": {
"analysis": {
"analyzer": {
"rebuilt_fingerprint": {
"tokenizer": "standard",
"filter": [
"lowercase",
"asciifolding",
"fingerprint"
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: fingerprint_example, first: fingerprint, second: rebuilt_fingerprint}\nendyaml\n/]

View File

@ -4,14 +4,6 @@
The `keyword` analyzer is a ``noop'' analyzer which returns the entire input The `keyword` analyzer is a ``noop'' analyzer which returns the entire input
string as a single token. string as a single token.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-keyword-tokenizer,Keyword Tokenizer>>
[float] [float]
=== Example output === Example output
@ -57,3 +49,40 @@ The above sentence would produce the following single term:
=== Configuration === Configuration
The `keyword` analyzer is not configurable. The `keyword` analyzer is not configurable.
[float]
=== Definition
The `keyword` analyzer consists of:
Tokenizer::
* <<analysis-keyword-tokenizer,Keyword Tokenizer>>
If you need to customize the `keyword` analyzer then you need to
recreate it as a `custom` analyzer and modify it, usually by adding
token filters. Usually, you should prefer the
<<keyword, Keyword type>> when you want strings that are not split
into tokens, but just in case you need it, this would recreate the
built-in `keyword` analyzer and you can use it as a starting point
for further customization:
[source,js]
----------------------------------------------------
PUT /keyword_example
{
"settings": {
"analysis": {
"analyzer": {
"rebuilt_keyword": {
"tokenizer": "keyword",
"filter": [ <1>
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: keyword_example, first: keyword, second: rebuilt_keyword}\nendyaml\n/]
<1> You'd add any token filters here.

View File

@ -19,19 +19,6 @@ Read more about http://www.regular-expressions.info/catastrophic.html[pathologic
======================================== ========================================
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-pattern-tokenizer,Pattern Tokenizer>>
Token Filters::
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
[float] [float]
=== Example output === Example output
@ -378,3 +365,51 @@ The regex above is easier to understand as:
[\p{L}&&[^\p{Lu}]] # then lower case [\p{L}&&[^\p{Lu}]] # then lower case
) )
-------------------------------------------------- --------------------------------------------------
[float]
=== Definition
The `pattern` anlayzer consists of:
Tokenizer::
* <<analysis-pattern-tokenizer,Pattern Tokenizer>>
Token Filters::
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
If you need to customize the `pattern` analyzer beyond the configuration
parameters then you need to recreate it as a `custom` analyzer and modify
it, usually by adding token filters. This would recreate the built-in
`pattern` analyzer and you can use it as a starting point for further
customization:
[source,js]
----------------------------------------------------
PUT /pattern_example
{
"settings": {
"analysis": {
"tokenizer": {
"split_on_non_word": {
"type": "pattern",
"pattern": "\\W+" <1>
}
},
"analyzer": {
"rebuilt_pattern": {
"tokenizer": "split_on_non_word",
"filter": [
"lowercase" <2>
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: pattern_example, first: pattern, second: rebuilt_pattern}\nendyaml\n/]
<1> The default pattern is `\W+` which splits on non-word characters
and this is where you'd change it.
<2> You'd add other token filters after `lowercase`.

View File

@ -4,14 +4,6 @@
The `simple` analyzer breaks text into terms whenever it encounters a The `simple` analyzer breaks text into terms whenever it encounters a
character which is not a letter. All terms are lower cased. character which is not a letter. All terms are lower cased.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-lowercase-tokenizer,Lower Case Tokenizer>>
[float] [float]
=== Example output === Example output
@ -127,3 +119,37 @@ The above sentence would produce the following terms:
=== Configuration === Configuration
The `simple` analyzer is not configurable. The `simple` analyzer is not configurable.
[float]
=== Definition
The `simple` analzyer consists of:
Tokenizer::
* <<analysis-lowercase-tokenizer,Lower Case Tokenizer>>
If you need to customize the `simple` analyzer then you need to recreate
it as a `custom` analyzer and modify it, usually by adding token filters.
This would recreate the built-in `simple` analyzer and you can use it as
a starting point for further customization:
[source,js]
----------------------------------------------------
PUT /simple_example
{
"settings": {
"analysis": {
"analyzer": {
"rebuilt_simple": {
"tokenizer": "lowercase",
"filter": [ <1>
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: simple_example, first: simple, second: rebuilt_simple}\nendyaml\n/]
<1> You'd add any token filters here.

View File

@ -7,19 +7,6 @@ Segmentation algorithm, as specified in
http://unicode.org/reports/tr29/[Unicode Standard Annex #29]) and works well http://unicode.org/reports/tr29/[Unicode Standard Annex #29]) and works well
for most languages. for most languages.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-standard-tokenizer,Standard Tokenizer>>
Token Filters::
* <<analysis-standard-tokenfilter,Standard Token Filter>>
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
[float] [float]
=== Example output === Example output
@ -276,3 +263,44 @@ The above example produces the following terms:
--------------------------- ---------------------------
[ 2, quick, brown, foxes, jumpe, d, over, lazy, dog's, bone ] [ 2, quick, brown, foxes, jumpe, d, over, lazy, dog's, bone ]
--------------------------- ---------------------------
[float]
=== Definition
The `standard` analyzer consists of:
Tokenizer::
* <<analysis-standard-tokenizer,Standard Tokenizer>>
Token Filters::
* <<analysis-standard-tokenfilter,Standard Token Filter>>
* <<analysis-lowercase-tokenfilter,Lower Case Token Filter>>
* <<analysis-stop-tokenfilter,Stop Token Filter>> (disabled by default)
If you need to customize the `standard` analyzer beyond the configuration
parameters then you need to recreate it as a `custom` analyzer and modify
it, usually by adding token filters. This would recreate the built-in
`standard` analyzer and you can use it as a starting point:
[source,js]
----------------------------------------------------
PUT /standard_example
{
"settings": {
"analysis": {
"analyzer": {
"rebuilt_standard": {
"tokenizer": "standard",
"filter": [
"standard",
"lowercase" <1>
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: standard_example, first: standard, second: rebuilt_standard}\nendyaml\n/]
<1> You'd add any token filters after `lowercase`.

View File

@ -5,17 +5,6 @@ The `stop` analyzer is the same as the <<analysis-simple-analyzer,`simple` analy
but adds support for removing stop words. It defaults to using the but adds support for removing stop words. It defaults to using the
`_english_` stop words. `_english_` stop words.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-lowercase-tokenizer,Lower Case Tokenizer>>
Token filters::
* <<analysis-stop-tokenfilter,Stop Token Filter>>
[float] [float]
=== Example output === Example output
@ -239,3 +228,50 @@ The above example produces the following terms:
--------------------------- ---------------------------
[ quick, brown, foxes, jumped, lazy, dog, s, bone ] [ quick, brown, foxes, jumped, lazy, dog, s, bone ]
--------------------------- ---------------------------
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-lowercase-tokenizer,Lower Case Tokenizer>>
Token filters::
* <<analysis-stop-tokenfilter,Stop Token Filter>>
If you need to customize the `stop` analyzer beyond the configuration
parameters then you need to recreate it as a `custom` analyzer and modify
it, usually by adding token filters. This would recreate the built-in
`stop` analyzer and you can use it as a starting point for further
customization:
[source,js]
----------------------------------------------------
PUT /stop_example
{
"settings": {
"analysis": {
"filter": {
"english_stop": {
"type": "stop",
"stopwords": "_english_" <1>
}
},
"analyzer": {
"rebuilt_stop": {
"tokenizer": "lowercase",
"filter": [
"english_stop" <2>
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: stop_example, first: stop, second: rebuilt_stop}\nendyaml\n/]
<1> The default stopwords can be overridden with the `stopwords`
or `stopwords_path` parameters.
<2> You'd add any token filters after `english_stop`.

View File

@ -4,14 +4,6 @@
The `whitespace` analyzer breaks text into terms whenever it encounters a The `whitespace` analyzer breaks text into terms whenever it encounters a
whitespace character. whitespace character.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-whitespace-tokenizer,Whitespace Tokenizer>>
[float] [float]
=== Example output === Example output
@ -120,3 +112,37 @@ The above sentence would produce the following terms:
=== Configuration === Configuration
The `whitespace` analyzer is not configurable. The `whitespace` analyzer is not configurable.
[float]
=== Definition
It consists of:
Tokenizer::
* <<analysis-whitespace-tokenizer,Whitespace Tokenizer>>
If you need to customize the `whitespace` analyzer then you need to
recreate it as a `custom` analyzer and modify it, usually by adding
token filters. This would recreate the built-in `whitespace` analyzer
and you can use it as a starting point for further customization:
[source,js]
----------------------------------------------------
PUT /whitespace_example
{
"settings": {
"analysis": {
"analyzer": {
"rebuilt_whitespace": {
"tokenizer": "whitespace",
"filter": [ <1>
]
}
}
}
}
}
----------------------------------------------------
// CONSOLE
// TEST[s/\n$/\nstartyaml\n - compare_analyzers: {index: whitespace_example, first: whitespace, second: rebuilt_whitespace}\nendyaml\n/]
<1> You'd add any token filters here.

View File

@ -235,8 +235,8 @@ The output from the above is:
"timed_out": false, "timed_out": false,
"took": $body.took, "took": $body.took,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },

View File

@ -294,8 +294,8 @@ GET my_index/_search
"took": $body.took, "took": $body.took,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },

View File

@ -300,11 +300,7 @@ Responds:
"indices": { "indices": {
"twitter": { "twitter": {
"shards": { "shards": {
"0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}], "0": [{"state": "STARTED"}, {"state": "UNASSIGNED"}]
"1": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
"2": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
"3": [{"state": "STARTED"}, {"state": "UNASSIGNED"}],
"4": [{"state": "STARTED"}, {"state": "UNASSIGNED"}]
} }
} }
} }

View File

@ -16,7 +16,7 @@ Might respond with:
[source,txt] [source,txt]
-------------------------------------------------- --------------------------------------------------
shards disk.indices disk.used disk.avail disk.total disk.percent host ip node shards disk.indices disk.used disk.avail disk.total disk.percent host ip node
5 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2 1 260b 47.3gb 43.4gb 100.7gb 46 127.0.0.1 127.0.0.1 CSUXak2
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/] // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/ s/46/\\d+/]
// TESTRESPONSE[s/CSUXak2/.+/ _cat] // TESTRESPONSE[s/CSUXak2/.+/ _cat]

View File

@ -14,7 +14,7 @@ GET /_cat/health?v
[source,txt] [source,txt]
-------------------------------------------------- --------------------------------------------------
epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent epoch timestamp cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
1475871424 16:17:04 elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% 1475871424 16:17:04 elasticsearch green 1 1 1 1 0 0 0 0 - 100.0%
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/] // TESTRESPONSE[s/1475871424 16:17:04/\\d+ \\d+:\\d+:\\d+/]
// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat]
@ -33,7 +33,7 @@ which looks like:
[source,txt] [source,txt]
-------------------------------------------------- --------------------------------------------------
cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent cluster status node.total node.data shards pri relo init unassign pending_tasks max_task_wait_time active_shards_percent
elasticsearch green 1 1 5 5 0 0 0 0 - 100.0% elasticsearch green 1 1 1 1 0 0 0 0 - 100.0%
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat] // TESTRESPONSE[s/elasticsearch/[^ ]+/ s/0 -/\\d+ (-|\\d+(\\.\\d+)?[ms]+)/ _cat]

View File

@ -18,7 +18,7 @@ Might respond with:
-------------------------------------------------- --------------------------------------------------
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb
green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat]
@ -81,7 +81,7 @@ Which looks like:
-------------------------------------------------- --------------------------------------------------
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb yellow open twitter u8FNjxh8Rfy_awN11oDKYQ 1 1 1200 0 88.1kb 88.1kb
green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 5 0 0 0 260b 260b green open twitter2 nYFWZEO7TUiOjLQXBaYJpA 1 0 0 0 260b 260b
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/] // TESTRESPONSE[s/\d+(\.\d+)?[tgmk]?b/\\d+(\\.\\d+)?[tgmk]?b/]
// TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat] // TESTRESPONSE[s/u8FNjxh8Rfy_awN11oDKYQ|nYFWZEO7TUiOjLQXBaYJpA/.+/ _cat]

View File

@ -17,8 +17,8 @@ might look like:
["source","txt",subs="attributes,callouts"] ["source","txt",subs="attributes,callouts"]
-------------------------------------------------- --------------------------------------------------
index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound index shard prirep ip segment generation docs.count docs.deleted size size.memory committed searchable version compound
test 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true test 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
test1 4 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true test1 0 p 127.0.0.1 _0 0 1 0 3kb 2042 false true {lucene_version} true
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat] // TESTRESPONSE[s/3kb/\\d+(\\.\\d+)?[mk]?b/ s/2042/\\d+/ _cat]

View File

@ -3,7 +3,7 @@
The cluster health API allows to get a very simple status on the health The cluster health API allows to get a very simple status on the health
of the cluster. For example, on a quiet single node cluster with a single index of the cluster. For example, on a quiet single node cluster with a single index
with 5 shards and one replica, this: with one shard and one replica, this:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
@ -22,11 +22,11 @@ Returns this:
"timed_out" : false, "timed_out" : false,
"number_of_nodes" : 1, "number_of_nodes" : 1,
"number_of_data_nodes" : 1, "number_of_data_nodes" : 1,
"active_primary_shards" : 5, "active_primary_shards" : 1,
"active_shards" : 5, "active_shards" : 1,
"relocating_shards" : 0, "relocating_shards" : 0,
"initializing_shards" : 0, "initializing_shards" : 0,
"unassigned_shards" : 5, "unassigned_shards" : 1,
"delayed_unassigned_shards": 0, "delayed_unassigned_shards": 0,
"number_of_pending_tasks" : 0, "number_of_pending_tasks" : 0,
"number_of_in_flight_fetch": 0, "number_of_in_flight_fetch": 0,

View File

@ -95,7 +95,7 @@ Replication is important for two primary reasons:
To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards). To summarize, each index can be split into multiple shards. An index can also be replicated zero (meaning no replicas) or more times. Once replicated, each index will have primary shards (the original shards that were replicated from) and replica shards (the copies of the primary shards).
The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact. The number of shards and replicas can be defined per index at the time the index is created. After the index is created, you may change the number of replicas dynamically anytime but you cannot change the number of shards after-the-fact.
By default, each index in Elasticsearch is allocated 5 primary shards and 1 replica which means that if you have at least two nodes in your cluster, your index will have 5 primary shards and another 5 replica shards (1 complete replica) for a total of 10 shards per index. By default, each index in Elasticsearch is allocated one primary shard and one replica which means that if you have at least two nodes in your cluster, your index will have one primary shard and another replica shard (one complete replica) for a total of two shards per index.
NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents. NOTE: Each Elasticsearch shard is a Lucene index. There is a maximum number of documents you can have in a single Lucene index. As of https://issues.apache.org/jira/browse/LUCENE-5843[`LUCENE-5843`], the limit is `2,147,483,519` (= Integer.MAX_VALUE - 128) documents.
You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API. You can monitor shard sizes using the {ref}/cat-shards.html[`_cat/shards`] API.
@ -366,11 +366,11 @@ And the response:
[source,txt] [source,txt]
-------------------------------------------------- --------------------------------------------------
health status index uuid pri rep docs.count docs.deleted store.size pri.store.size health status index uuid pri rep docs.count docs.deleted store.size pri.store.size
yellow open customer 95SQ4TSUT7mWBT7VNHH67A 5 1 0 0 260b 260b yellow open customer 95SQ4TSUT7mWBT7VNHH67A 1 1 0 0 260b 260b
-------------------------------------------------- --------------------------------------------------
// TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat] // TESTRESPONSE[s/95SQ4TSUT7mWBT7VNHH67A/.+/ s/260b/\\d+\\.?\\d?k?b/ _cat]
The results of the second command tells us that we now have 1 index named customer and it has 5 primary shards and 1 replica (the defaults) and it contains 0 documents in it. The results of the second command tells us that we now have one index named customer and it has one primary shard and one replica (the defaults) and it contains zero documents in it.
You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green. You might also notice that the customer index has a yellow health tagged to it. Recall from our previous discussion that yellow means that some replicas are not (yet) allocated. The reason this happens for this index is because Elasticsearch by default created one replica for this index. Since we only have one node running at the moment, that one replica cannot yet be allocated (for high availability) until a later point in time when another node joins the cluster. Once that replica gets allocated onto a second node, the health status for this index will turn to green.

View File

@ -105,12 +105,13 @@
you index a document, it is indexed first on the primary shard, then you index a document, it is indexed first on the primary shard, then
on all <<glossary-replica-shard,replicas>> of the primary shard. on all <<glossary-replica-shard,replicas>> of the primary shard.
+ +
By default, an <<glossary-index,index>> has 5 primary shards. You can By default, an <<glossary-index,index>> has one primary shard. You can specify
specify fewer or more primary shards to scale the number of more primary shards to scale the number of <<glossary-document,documents>>
<<glossary-document,documents>> that your index can handle. that your index can handle.
+ +
You cannot change the number of primary shards in an index, once the You cannot change the number of primary shards in an index, once the index is
index is created. index is created. However, an index can be split into a new index using the
<<indices-split-index, split API>>.
+ +
See also <<glossary-routing,routing>> See also <<glossary-routing,routing>>

View File

@ -78,31 +78,31 @@ GET index/_search
"took": 2, "took": 2,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
"hits": { "hits": {
"total": 2, "total": 2,
"max_score": 0.2876821, "max_score": 0.18232156,
"hits": [ "hits": [
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "2", "_id": "1",
"_score": 0.2876821, "_score": 0.18232156,
"_source": { "_source": {
"body": "A pair of skis" "body": "Ski resort"
} }
}, },
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "2",
"_score": 0.2876821, "_score": 0.18232156,
"_source": { "_source": {
"body": "Ski resort" "body": "A pair of skis"
} }
} }
] ]
@ -136,20 +136,20 @@ GET index/_search
"took": 1, "took": 1,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
"hits": { "hits": {
"total": 1, "total": 1,
"max_score": 0.2876821, "max_score": 0.80259144,
"hits": [ "hits": [
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "1",
"_score": 0.2876821, "_score": 0.80259144,
"_source": { "_source": {
"body": "Ski resort" "body": "Ski resort"
} }
@ -193,20 +193,20 @@ GET index/_search
"took": 2, "took": 2,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
"hits": { "hits": {
"total": 1, "total": 1,
"max_score": 0.2876821, "max_score": 0.80259144,
"hits": [ "hits": [
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "1",
"_score": 0.2876821, "_score": 0.80259144,
"_source": { "_source": {
"body": "Ski resort" "body": "Ski resort"
} }

View File

@ -103,9 +103,14 @@ specific index module:
`index.auto_expand_replicas`:: `index.auto_expand_replicas`::
Auto-expand the number of replicas based on the number of available nodes. Auto-expand the number of replicas based on the number of data nodes in the cluster.
Set to a dash delimited lower and upper bound (e.g. `0-5`) or use `all` Set to a dash delimited lower and upper bound (e.g. `0-5`) or use `all`
for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled). for the upper bound (e.g. `0-all`). Defaults to `false` (i.e. disabled).
Note that the auto-expanded number of replicas does not take any other allocation
rules into account, such as <<allocation-awareness,shard allocation awareness>>,
<<shard-allocation-filtering,filtering>> or <<allocation-total-shards,total shards per node>>,
and this can lead to the cluster health becoming `YELLOW` if the applicable rules
prevent all the replicas from being allocated.
`index.search.idle.after`:: `index.search.idle.after`::
How long a shard can not receive a search or get request until it's considered How long a shard can not receive a search or get request until it's considered

View File

@ -108,8 +108,8 @@ provide a command-line tool for this, `elasticsearch-translog`.
[WARNING] [WARNING]
The `elasticsearch-translog` tool should *not* be run while Elasticsearch is The `elasticsearch-translog` tool should *not* be run while Elasticsearch is
running, and you will permanently lose the documents that were contained only in running. If you attempt to run this tool while Elasticsearch is running, you
the translog! will permanently lose the documents that were contained only in the translog!
In order to run the `elasticsearch-translog` tool, specify the `truncate` In order to run the `elasticsearch-translog` tool, specify the `truncate`
subcommand as well as the directory for the corrupted translog with the `-d` subcommand as well as the directory for the corrupted translog with the `-d`

View File

@ -5,4 +5,6 @@ include::testing.asciidoc[]
include::glossary.asciidoc[] include::glossary.asciidoc[]
include::release-notes/highlights.asciidoc[]
include::{docdir}/../CHANGELOG.asciidoc[] include::{docdir}/../CHANGELOG.asciidoc[]

View File

@ -106,11 +106,7 @@ which returns something similar to:
"num_docs" : 0 "num_docs" : 0
} }
} }
], ]
"1": ...,
"2": ...,
"3": ...,
"4": ...
} }
} }
} }
@ -120,10 +116,6 @@ which returns something similar to:
// TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/] // TESTRESPONSE[s/"translog_uuid" : "hnOG3xFcTDeoI_kvvvOdNA"/"translog_uuid": $body.indices.twitter.shards.0.0.commit.user_data.translog_uuid/]
// TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/] // TESTRESPONSE[s/"history_uuid" : "XP7KDJGiS1a2fHYiFL5TXQ"/"history_uuid": $body.indices.twitter.shards.0.0.commit.user_data.history_uuid/]
// TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/] // TESTRESPONSE[s/"sync_id" : "AVvFY-071siAOuFGEO9P"/"sync_id": $body.indices.twitter.shards.0.0.commit.user_data.sync_id/]
// TESTRESPONSE[s/"1": \.\.\./"1": $body.indices.twitter.shards.1/]
// TESTRESPONSE[s/"2": \.\.\./"2": $body.indices.twitter.shards.2/]
// TESTRESPONSE[s/"3": \.\.\./"3": $body.indices.twitter.shards.3/]
// TESTRESPONSE[s/"4": \.\.\./"4": $body.indices.twitter.shards.4/]
<1> the `sync id` marker <1> the `sync id` marker
[float] [float]

View File

@ -42,7 +42,7 @@ PUT /my_source_index/_settings
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/^/PUT my_source_index\n/] // TEST[s/^/PUT my_source_index\n{"settings":{"index.number_of_shards":2}}\n/]
<1> Forces the relocation of a copy of each shard to the node with name <1> Forces the relocation of a copy of each shard to the node with name
`shrink_node_name`. See <<shard-allocation-filtering>> for more options. `shrink_node_name`. See <<shard-allocation-filtering>> for more options.
@ -119,7 +119,7 @@ POST my_source_index/_shrink/my_target_index?copy_settings=true
} }
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true}}\n/] // TEST[s/^/PUT my_source_index\n{"settings": {"index.number_of_shards":5,"index.blocks.write": true}}\n/]
<1> The number of shards in the target index. This must be a factor of the <1> The number of shards in the target index. This must be a factor of the
number of shards in the source index. number of shards in the source index.

View File

@ -83,31 +83,31 @@ both index and query time.
"took": $body.took, "took": $body.took,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
"hits": { "hits": {
"total": 2, "total": 2,
"max_score": 0.2876821, "max_score": 0.47000363,
"hits": [ "hits": [
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "2", "_id": "1",
"_score": 0.2876821, "_score": 0.47000363,
"_source": { "_source": {
"foo": "bar" "foo": "BÀR"
} }
}, },
{ {
"_index": "index", "_index": "index",
"_type": "_doc", "_type": "_doc",
"_id": "1", "_id": "2",
"_score": 0.2876821, "_score": 0.47000363,
"_source": { "_source": {
"foo": "BÀR" "foo": "bar"
} }
} }
] ]
@ -144,8 +144,8 @@ returns
"took": 43, "took": 43,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },

View File

@ -194,8 +194,8 @@ now returns matches from the new index:
"took": 3, "took": 3,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
@ -389,8 +389,8 @@ This results in a response like this:
"took": 6, "took": 6,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
@ -549,8 +549,8 @@ GET /my_queries1/_search
"took": 6, "took": 6,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped": 0, "skipped": 0,
"failed": 0 "failed": 0
}, },

View File

@ -329,3 +329,16 @@ and will not match any documents for this query. This can be useful when
querying multiple indexes which might have different mappings. When set to querying multiple indexes which might have different mappings. When set to
`false` (the default value) the query will throw an exception if the field `false` (the default value) the query will throw an exception if the field
is not mapped. is not mapped.
[float]
==== Notes on Precision
Geopoints have limited precision and are always rounded down during index time.
During the query time, upper boundaries of the bounding boxes are rounded down,
while lower boundaries are rounded up. As a result, the points along on the
lower bounds (bottom and left edges of the bounding box) might not make it into
the bounding box due to the rounding error. At the same time points alongside
the upper bounds (top and right edges) might be selected by the query even if
they are located slightly outside the edge. The rounding error should be less
than 4.20e-8 degrees on the latitude and less than 8.39e-8 degrees on the
longitude, which translates to less than 1cm error even at the equator.

View File

@ -83,8 +83,8 @@ The above request will yield the following response:
"took": 13, "took": 13,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
@ -227,8 +227,8 @@ GET /my-index/_search
"took": 13, "took": 13,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
@ -299,7 +299,7 @@ Index response:
"failed": 0 "failed": 0
}, },
"result": "created", "result": "created",
"_seq_no" : 0, "_seq_no" : 1,
"_primary_term" : 1 "_primary_term" : 1
} }
-------------------------------------------------- --------------------------------------------------
@ -407,8 +407,8 @@ This will yield the following response.
"took": 7, "took": 7,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
@ -512,8 +512,8 @@ The slightly different response:
"took": 13, "took": 13,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
@ -608,8 +608,8 @@ The above search request returns a response similar to this:
"took": 13, "took": 13,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },

View File

@ -68,20 +68,20 @@ Response:
"took": 13, "took": 13,
"timed_out": false, "timed_out": false,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"skipped" : 0, "skipped" : 0,
"failed": 0 "failed": 0
}, },
"hits": { "hits": {
"total": 1, "total": 1,
"max_score": 0.5753642, "max_score": 0.87546873,
"hits": [ "hits": [
{ {
"_index": "my-index", "_index": "my-index",
"_type": "_doc", "_type": "_doc",
"_id": "2", "_id": "2",
"_score": 0.5753642, "_score": 0.87546873,
"_source": { "_source": {
"codes": ["def", "ghi"], "codes": ["def", "ghi"],
"required_matches": 2 "required_matches": 2

View File

@ -0,0 +1,9 @@
[[release-highlights-7.0.0]]
== 7.0.0 release highlights
++++
<titleabbrev>7.0.0</titleabbrev>
++++
coming[7.0.0]
See also <<breaking-changes-7.0>> and <<release-notes-7.0.0>>.

View File

@ -0,0 +1,13 @@
[[release-highlights]]
= {es} Release Highlights
[partintro]
--
This section summarizes the most important changes in each release. For the
full list, see <<es-release-notes>> and <<breaking-changes>>.
* <<release-highlights-7.0.0>>
--
include::highlights-7.0.0.asciidoc[]

View File

@ -37,8 +37,8 @@ tweets from the `twitter` index for a certain user. The result is:
{ {
"count" : 1, "count" : 1,
"_shards" : { "_shards" : {
"total" : 5, "total" : 1,
"successful" : 5, "successful" : 1,
"skipped" : 0, "skipped" : 0,
"failed" : 0 "failed" : 0
} }

View File

@ -18,7 +18,7 @@ Full example:
GET /twitter/_search_shards GET /twitter/_search_shards
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/^/PUT twitter\n/] // TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/]
This will yield the following result: This will yield the following result:
@ -103,7 +103,7 @@ And specifying the same request, this time with a routing value:
GET /twitter/_search_shards?routing=foo,bar GET /twitter/_search_shards?routing=foo,bar
-------------------------------------------------- --------------------------------------------------
// CONSOLE // CONSOLE
// TEST[s/^/PUT twitter\n/] // TEST[s/^/PUT twitter\n{"settings":{"index.number_of_shards":5}}\n/]
This will yield the following result: This will yield the following result:

View File

@ -177,8 +177,8 @@ returns this response:
-------------------------------------------------- --------------------------------------------------
{ {
"_shards" : { "_shards" : {
"total" : 5, "total" : 1,
"successful" : 5, "successful" : 1,
"skipped" : 0, "skipped" : 0,
"failed" : 0 "failed" : 0
}, },
@ -251,8 +251,8 @@ Which should look like:
"took": 6, "took": 6,
"timed_out": false, "timed_out": false,
"_shards" : { "_shards" : {
"total" : 5, "total" : 1,
"successful" : 5, "successful" : 1,
"skipped" : 0, "skipped" : 0,
"failed" : 0 "failed" : 0
}, },

View File

@ -218,8 +218,8 @@ Response:
{ {
"valid": true, "valid": true,
"_shards": { "_shards": {
"total": 5, "total": 1,
"successful": 5, "successful": 1,
"failed": 0 "failed": 0
}, },
"explanations": [ "explanations": [
@ -227,31 +227,7 @@ Response:
"index": "twitter", "index": "twitter",
"shard": 0, "shard": 0,
"valid": true, "valid": true,
"explanation": "user:kimchy~2" "explanation": "(user:kimchi)^0.8333333 user:kimchy"
},
{
"index": "twitter",
"shard": 1,
"valid": true,
"explanation": "user:kimchy~2"
},
{
"index": "twitter",
"shard": 2,
"valid": true,
"explanation": "user:kimchy~2"
},
{
"index": "twitter",
"shard": 3,
"valid": true,
"explanation": "(user:kimchi)^0.8333333"
},
{
"index": "twitter",
"shard": 4,
"valid": true,
"explanation": "user:kimchy"
} }
] ]
} }

View File

@ -57,7 +57,7 @@ public class RankEvalSpec implements Writeable, ToXContentObject {
/** Default max number of requests. */ /** Default max number of requests. */
private static final int MAX_CONCURRENT_SEARCHES = 10; private static final int MAX_CONCURRENT_SEARCHES = 10;
/** optional: Templates to base test requests on */ /** optional: Templates to base test requests on */
private Map<String, Script> templates = new HashMap<>(); private final Map<String, Script> templates = new HashMap<>();
public RankEvalSpec(List<RatedRequest> ratedRequests, EvaluationMetric metric, Collection<ScriptWithId> templates) { public RankEvalSpec(List<RatedRequest> ratedRequests, EvaluationMetric metric, Collection<ScriptWithId> templates) {
this.metric = Objects.requireNonNull(metric, "Cannot evaluate ranking if no evaluation metric is provided."); this.metric = Objects.requireNonNull(metric, "Cannot evaluate ranking if no evaluation metric is provided.");
@ -68,8 +68,8 @@ public class RankEvalSpec implements Writeable, ToXContentObject {
this.ratedRequests = ratedRequests; this.ratedRequests = ratedRequests;
if (templates == null || templates.isEmpty()) { if (templates == null || templates.isEmpty()) {
for (RatedRequest request : ratedRequests) { for (RatedRequest request : ratedRequests) {
if (request.getTestRequest() == null) { if (request.getEvaluationRequest() == null) {
throw new IllegalStateException("Cannot evaluate ranking if neither template nor test request is " throw new IllegalStateException("Cannot evaluate ranking if neither template nor evaluation request is "
+ "provided. Seen for request id: " + request.getId()); + "provided. Seen for request id: " + request.getId());
} }
} }

View File

@ -75,9 +75,12 @@ public class RatedRequest implements Writeable, ToXContentObject {
private final String id; private final String id;
private final List<String> summaryFields; private final List<String> summaryFields;
private final List<RatedDocument> ratedDocs; private final List<RatedDocument> ratedDocs;
// Search request to execute for this rated request. This can be null if template and corresponding parameters are supplied. /**
* Search request to execute for this rated request. This can be null in
* case the query is supplied as a template with corresponding parameters
*/
@Nullable @Nullable
private SearchSourceBuilder testRequest; private final SearchSourceBuilder evaluationRequest;
/** /**
* Map of parameters to use for filling a query template, can be used * Map of parameters to use for filling a query template, can be used
* instead of providing testRequest. * instead of providing testRequest.
@ -86,27 +89,49 @@ public class RatedRequest implements Writeable, ToXContentObject {
@Nullable @Nullable
private String templateId; private String templateId;
private RatedRequest(String id, List<RatedDocument> ratedDocs, SearchSourceBuilder testRequest, /**
* Create a rated request with template ids and parameters.
*
* @param id a unique name for this rated request
* @param ratedDocs a list of document ratings
* @param params template parameters
* @param templateId a templare id
*/
public RatedRequest(String id, List<RatedDocument> ratedDocs, Map<String, Object> params,
String templateId) {
this(id, ratedDocs, null, params, templateId);
}
/**
* Create a rated request using a {@link SearchSourceBuilder} to define the
* evaluated query.
*
* @param id a unique name for this rated request
* @param ratedDocs a list of document ratings
* @param evaluatedQuery the query that is evaluated
*/
public RatedRequest(String id, List<RatedDocument> ratedDocs, SearchSourceBuilder evaluatedQuery) {
this(id, ratedDocs, evaluatedQuery, new HashMap<>(), null);
}
private RatedRequest(String id, List<RatedDocument> ratedDocs, SearchSourceBuilder evaluatedQuery,
Map<String, Object> params, String templateId) { Map<String, Object> params, String templateId) {
if (params != null && (params.size() > 0 && testRequest != null)) { if (params != null && (params.size() > 0 && evaluatedQuery != null)) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Ambiguous rated request: Set both, verbatim test request and test request " "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters.");
+ "template parameters.");
} }
if (templateId != null && testRequest != null) { if (templateId != null && evaluatedQuery != null) {
throw new IllegalArgumentException( throw new IllegalArgumentException(
"Ambiguous rated request: Set both, verbatim test request and test request " "Ambiguous rated request: Set both, verbatim test request and test request " + "template parameters.");
+ "template parameters.");
} }
if ((params == null || params.size() < 1) && testRequest == null) { if ((params == null || params.size() < 1) && evaluatedQuery == null) {
throw new IllegalArgumentException( throw new IllegalArgumentException("Need to set at least test request or test request template parameters.");
"Need to set at least test request or test request template parameters.");
} }
if ((params != null && params.size() > 0) && templateId == null) { if ((params != null && params.size() > 0) && templateId == null) {
throw new IllegalArgumentException( throw new IllegalArgumentException("If template parameters are supplied need to set id of template to apply " + "them to too.");
"If template parameters are supplied need to set id of template to apply "
+ "them to too.");
} }
validateEvaluatedQuery(evaluatedQuery);
// check that not two documents with same _index/id are specified // check that not two documents with same _index/id are specified
Set<DocumentKey> docKeys = new HashSet<>(); Set<DocumentKey> docKeys = new HashSet<>();
for (RatedDocument doc : ratedDocs) { for (RatedDocument doc : ratedDocs) {
@ -118,7 +143,7 @@ public class RatedRequest implements Writeable, ToXContentObject {
} }
this.id = id; this.id = id;
this.testRequest = testRequest; this.evaluationRequest = evaluatedQuery;
this.ratedDocs = new ArrayList<>(ratedDocs); this.ratedDocs = new ArrayList<>(ratedDocs);
if (params != null) { if (params != null) {
this.params = new HashMap<>(params); this.params = new HashMap<>(params);
@ -129,18 +154,30 @@ public class RatedRequest implements Writeable, ToXContentObject {
this.summaryFields = new ArrayList<>(); this.summaryFields = new ArrayList<>();
} }
public RatedRequest(String id, List<RatedDocument> ratedDocs, Map<String, Object> params, static void validateEvaluatedQuery(SearchSourceBuilder evaluationRequest) {
String templateId) { // ensure that testRequest, if set, does not contain aggregation, suggest or highlighting section
this(id, ratedDocs, null, params, templateId); if (evaluationRequest != null) {
if (evaluationRequest.suggest() != null) {
throw new IllegalArgumentException("Query in rated requests should not contain a suggest section.");
}
if (evaluationRequest.aggregations() != null) {
throw new IllegalArgumentException("Query in rated requests should not contain aggregations.");
}
if (evaluationRequest.highlighter() != null) {
throw new IllegalArgumentException("Query in rated requests should not contain a highlighter section.");
}
if (evaluationRequest.explain() != null && evaluationRequest.explain()) {
throw new IllegalArgumentException("Query in rated requests should not use explain.");
}
if (evaluationRequest.profile()) {
throw new IllegalArgumentException("Query in rated requests should not use profile.");
}
}
} }
public RatedRequest(String id, List<RatedDocument> ratedDocs, SearchSourceBuilder testRequest) { RatedRequest(StreamInput in) throws IOException {
this(id, ratedDocs, testRequest, new HashMap<>(), null);
}
public RatedRequest(StreamInput in) throws IOException {
this.id = in.readString(); this.id = in.readString();
testRequest = in.readOptionalWriteable(SearchSourceBuilder::new); evaluationRequest = in.readOptionalWriteable(SearchSourceBuilder::new);
int intentSize = in.readInt(); int intentSize = in.readInt();
ratedDocs = new ArrayList<>(intentSize); ratedDocs = new ArrayList<>(intentSize);
@ -159,7 +196,7 @@ public class RatedRequest implements Writeable, ToXContentObject {
@Override @Override
public void writeTo(StreamOutput out) throws IOException { public void writeTo(StreamOutput out) throws IOException {
out.writeString(id); out.writeString(id);
out.writeOptionalWriteable(testRequest); out.writeOptionalWriteable(evaluationRequest);
out.writeInt(ratedDocs.size()); out.writeInt(ratedDocs.size());
for (RatedDocument ratedDoc : ratedDocs) { for (RatedDocument ratedDoc : ratedDocs) {
@ -173,8 +210,8 @@ public class RatedRequest implements Writeable, ToXContentObject {
out.writeOptionalString(this.templateId); out.writeOptionalString(this.templateId);
} }
public SearchSourceBuilder getTestRequest() { public SearchSourceBuilder getEvaluationRequest() {
return testRequest; return evaluationRequest;
} }
/** return the user supplied request id */ /** return the user supplied request id */
@ -240,8 +277,8 @@ public class RatedRequest implements Writeable, ToXContentObject {
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(); builder.startObject();
builder.field(ID_FIELD.getPreferredName(), this.id); builder.field(ID_FIELD.getPreferredName(), this.id);
if (testRequest != null) { if (evaluationRequest != null) {
builder.field(REQUEST_FIELD.getPreferredName(), this.testRequest); builder.field(REQUEST_FIELD.getPreferredName(), this.evaluationRequest);
} }
builder.startArray(RATINGS_FIELD.getPreferredName()); builder.startArray(RATINGS_FIELD.getPreferredName());
for (RatedDocument doc : this.ratedDocs) { for (RatedDocument doc : this.ratedDocs) {
@ -285,7 +322,7 @@ public class RatedRequest implements Writeable, ToXContentObject {
RatedRequest other = (RatedRequest) obj; RatedRequest other = (RatedRequest) obj;
return Objects.equals(id, other.id) && Objects.equals(testRequest, other.testRequest) return Objects.equals(id, other.id) && Objects.equals(evaluationRequest, other.evaluationRequest)
&& Objects.equals(summaryFields, other.summaryFields) && Objects.equals(summaryFields, other.summaryFields)
&& Objects.equals(ratedDocs, other.ratedDocs) && Objects.equals(ratedDocs, other.ratedDocs)
&& Objects.equals(params, other.params) && Objects.equals(params, other.params)
@ -294,7 +331,7 @@ public class RatedRequest implements Writeable, ToXContentObject {
@Override @Override
public final int hashCode() { public final int hashCode() {
return Objects.hash(id, testRequest, summaryFields, ratedDocs, params, return Objects.hash(id, evaluationRequest, summaryFields, ratedDocs, params,
templateId); templateId);
} }
} }

View File

@ -52,6 +52,7 @@ import java.util.Map.Entry;
import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentHashMap;
import static org.elasticsearch.common.xcontent.XContentHelper.createParser; import static org.elasticsearch.common.xcontent.XContentHelper.createParser;
import static org.elasticsearch.index.rankeval.RatedRequest.validateEvaluatedQuery;
/** /**
* Instances of this class execute a collection of search intents (read: user * Instances of this class execute a collection of search intents (read: user
@ -99,15 +100,17 @@ public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequ
msearchRequest.maxConcurrentSearchRequests(evaluationSpecification.getMaxConcurrentSearches()); msearchRequest.maxConcurrentSearchRequests(evaluationSpecification.getMaxConcurrentSearches());
List<RatedRequest> ratedRequestsInSearch = new ArrayList<>(); List<RatedRequest> ratedRequestsInSearch = new ArrayList<>();
for (RatedRequest ratedRequest : ratedRequests) { for (RatedRequest ratedRequest : ratedRequests) {
SearchSourceBuilder ratedSearchSource = ratedRequest.getTestRequest(); SearchSourceBuilder evaluationRequest = ratedRequest.getEvaluationRequest();
if (ratedSearchSource == null) { if (evaluationRequest == null) {
Map<String, Object> params = ratedRequest.getParams(); Map<String, Object> params = ratedRequest.getParams();
String templateId = ratedRequest.getTemplateId(); String templateId = ratedRequest.getTemplateId();
TemplateScript.Factory templateScript = scriptsWithoutParams.get(templateId); TemplateScript.Factory templateScript = scriptsWithoutParams.get(templateId);
String resolvedRequest = templateScript.newInstance(params).execute(); String resolvedRequest = templateScript.newInstance(params).execute();
try (XContentParser subParser = createParser(namedXContentRegistry, try (XContentParser subParser = createParser(namedXContentRegistry,
LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentType.JSON)) { LoggingDeprecationHandler.INSTANCE, new BytesArray(resolvedRequest), XContentType.JSON)) {
ratedSearchSource = SearchSourceBuilder.fromXContent(subParser, false); evaluationRequest = SearchSourceBuilder.fromXContent(subParser, false);
// check for parts that should not be part of a ranking evaluation request
validateEvaluatedQuery(evaluationRequest);
} catch (IOException e) { } catch (IOException e) {
// if we fail parsing, put the exception into the errors map and continue // if we fail parsing, put the exception into the errors map and continue
errors.put(ratedRequest.getId(), e); errors.put(ratedRequest.getId(), e);
@ -116,17 +119,17 @@ public class TransportRankEvalAction extends HandledTransportAction<RankEvalRequ
} }
if (metric.forcedSearchSize().isPresent()) { if (metric.forcedSearchSize().isPresent()) {
ratedSearchSource.size(metric.forcedSearchSize().get()); evaluationRequest.size(metric.forcedSearchSize().get());
} }
ratedRequestsInSearch.add(ratedRequest); ratedRequestsInSearch.add(ratedRequest);
List<String> summaryFields = ratedRequest.getSummaryFields(); List<String> summaryFields = ratedRequest.getSummaryFields();
if (summaryFields.isEmpty()) { if (summaryFields.isEmpty()) {
ratedSearchSource.fetchSource(false); evaluationRequest.fetchSource(false);
} else { } else {
ratedSearchSource.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]); evaluationRequest.fetchSource(summaryFields.toArray(new String[summaryFields.size()]), new String[0]);
} }
SearchRequest searchRequest = new SearchRequest(request.indices(), ratedSearchSource); SearchRequest searchRequest = new SearchRequest(request.indices(), evaluationRequest);
searchRequest.indicesOptions(request.indicesOptions()); searchRequest.indicesOptions(request.indicesOptions());
msearchRequest.add(searchRequest); msearchRequest.add(searchRequest);
} }

View File

@ -33,7 +33,11 @@ import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.MatchAllQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.aggregations.AggregationBuilders;
import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.suggest.SuggestBuilder;
import org.elasticsearch.search.suggest.SuggestBuilders;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.BeforeClass; import org.junit.BeforeClass;
@ -165,7 +169,7 @@ public class RatedRequestsTests extends ESTestCase {
private static RatedRequest mutateTestItem(RatedRequest original) { private static RatedRequest mutateTestItem(RatedRequest original) {
String id = original.getId(); String id = original.getId();
SearchSourceBuilder testRequest = original.getTestRequest(); SearchSourceBuilder evaluationRequest = original.getEvaluationRequest();
List<RatedDocument> ratedDocs = original.getRatedDocs(); List<RatedDocument> ratedDocs = original.getRatedDocs();
Map<String, Object> params = original.getParams(); Map<String, Object> params = original.getParams();
List<String> summaryFields = original.getSummaryFields(); List<String> summaryFields = original.getSummaryFields();
@ -177,11 +181,11 @@ public class RatedRequestsTests extends ESTestCase {
id = randomValueOtherThan(id, () -> randomAlphaOfLength(10)); id = randomValueOtherThan(id, () -> randomAlphaOfLength(10));
break; break;
case 1: case 1:
if (testRequest != null) { if (evaluationRequest != null) {
int size = randomValueOtherThan(testRequest.size(), () -> randomInt(Integer.MAX_VALUE)); int size = randomValueOtherThan(evaluationRequest.size(), () -> randomInt(Integer.MAX_VALUE));
testRequest = new SearchSourceBuilder(); evaluationRequest = new SearchSourceBuilder();
testRequest.size(size); evaluationRequest.size(size);
testRequest.query(new MatchAllQueryBuilder()); evaluationRequest.query(new MatchAllQueryBuilder());
} else { } else {
if (randomBoolean()) { if (randomBoolean()) {
Map<String, Object> mutated = new HashMap<>(); Map<String, Object> mutated = new HashMap<>();
@ -204,10 +208,10 @@ public class RatedRequestsTests extends ESTestCase {
} }
RatedRequest ratedRequest; RatedRequest ratedRequest;
if (testRequest == null) { if (evaluationRequest == null) {
ratedRequest = new RatedRequest(id, ratedDocs, params, templateId); ratedRequest = new RatedRequest(id, ratedDocs, params, templateId);
} else { } else {
ratedRequest = new RatedRequest(id, ratedDocs, testRequest); ratedRequest = new RatedRequest(id, ratedDocs, evaluationRequest);
} }
ratedRequest.addSummaryFields(summaryFields); ratedRequest.addSummaryFields(summaryFields);
@ -258,6 +262,44 @@ public class RatedRequestsTests extends ESTestCase {
expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, null, "templateId")); expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, null, "templateId"));
} }
public void testAggsNotAllowed() {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
SearchSourceBuilder query = new SearchSourceBuilder();
query.aggregation(AggregationBuilders.terms("fieldName"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query));
assertEquals("Query in rated requests should not contain aggregations.", e.getMessage());
}
public void testSuggestionsNotAllowed() {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
SearchSourceBuilder query = new SearchSourceBuilder();
query.suggest(new SuggestBuilder().addSuggestion("id", SuggestBuilders.completionSuggestion("fieldname")));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query));
assertEquals("Query in rated requests should not contain a suggest section.", e.getMessage());
}
public void testHighlighterNotAllowed() {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
SearchSourceBuilder query = new SearchSourceBuilder();
query.highlighter(new HighlightBuilder().field("field"));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new RatedRequest("id", ratedDocs, query));
assertEquals("Query in rated requests should not contain a highlighter section.", e.getMessage());
}
public void testExplainNotAllowed() {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().explain(true)));
assertEquals("Query in rated requests should not use explain.", e.getMessage());
}
public void testProfileNotAllowed() {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> new RatedRequest("id", ratedDocs, new SearchSourceBuilder().profile(true)));
assertEquals("Query in rated requests should not use profile.", e.getMessage());
}
/** /**
* test that modifying the order of index/docId to make sure it doesn't * test that modifying the order of index/docId to make sure it doesn't
* matter for parsing xContent * matter for parsing xContent
@ -287,7 +329,7 @@ public class RatedRequestsTests extends ESTestCase {
try (XContentParser parser = createParser(JsonXContent.jsonXContent, querySpecString)) { try (XContentParser parser = createParser(JsonXContent.jsonXContent, querySpecString)) {
RatedRequest specification = RatedRequest.fromXContent(parser); RatedRequest specification = RatedRequest.fromXContent(parser);
assertEquals("my_qa_query", specification.getId()); assertEquals("my_qa_query", specification.getId());
assertNotNull(specification.getTestRequest()); assertNotNull(specification.getEvaluationRequest());
List<RatedDocument> ratedDocs = specification.getRatedDocs(); List<RatedDocument> ratedDocs = specification.getRatedDocs();
assertEquals(3, ratedDocs.size()); assertEquals(3, ratedDocs.size());
for (int i = 0; i < 3; i++) { for (int i = 0; i < 3; i++) {

View File

@ -1,5 +1,12 @@
--- ---
"Response format for search failures": "Response format for search failures":
- do:
indices.create:
index: source
body:
settings:
index.number_of_shards: 2
- do: - do:
index: index:
index: source index: source
@ -26,7 +33,7 @@
- match: {updated: 0} - match: {updated: 0}
- match: {version_conflicts: 0} - match: {version_conflicts: 0}
- match: {batches: 0} - match: {batches: 0}
- is_true: failures.0.shard - match: {failures.0.shard: 0}
- match: {failures.0.index: source} - match: {failures.0.index: source}
- is_true: failures.0.node - is_true: failures.0.node
- match: {failures.0.reason.type: script_exception} - match: {failures.0.reason.type: script_exception}

View File

@ -1,5 +1,12 @@
--- ---
"Response format for search failures": "Response format for search failures":
- do:
indices.create:
index: source
body:
settings:
index.number_of_shards: 2
- do: - do:
index: index:
index: source index: source
@ -22,7 +29,7 @@
- match: {updated: 0} - match: {updated: 0}
- match: {version_conflicts: 0} - match: {version_conflicts: 0}
- match: {batches: 0} - match: {batches: 0}
- is_true: failures.0.shard - match: {failures.0.shard: 0}
- match: {failures.0.index: source} - match: {failures.0.index: source}
- is_true: failures.0.node - is_true: failures.0.node
- match: {failures.0.reason.type: script_exception} - match: {failures.0.reason.type: script_exception}

View File

@ -34,6 +34,7 @@ File repositoryDir = new File(project.buildDir, "shared-repository")
/** A task to start the URLFixture which exposes the repositoryDir over HTTP **/ /** A task to start the URLFixture which exposes the repositoryDir over HTTP **/
task urlFixture(type: AntFixture) { task urlFixture(type: AntFixture) {
dependsOn testClasses
doFirst { doFirst {
repositoryDir.mkdirs() repositoryDir.mkdirs()
} }

View File

@ -161,7 +161,7 @@
search: search:
index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1 index: my_remote_cluster:aliased_test_index,my_remote_cluster:field_caps_index_1
- match: { _shards.total: 8 } - match: { _shards.total: 4 }
- match: { hits.total: 2 } - match: { hits.total: 2 }
- match: { hits.hits.0._source.filter_field: 1 } - match: { hits.hits.0._source.filter_field: 1 }
- match: { hits.hits.0._index: "my_remote_cluster:test_index" } - match: { hits.hits.0._index: "my_remote_cluster:test_index" }

View File

@ -27,6 +27,8 @@
indices.create: indices.create:
index: field_caps_index_1 index: field_caps_index_1
body: body:
settings:
index.number_of_shards: 1
mappings: mappings:
t: t:
properties: properties:
@ -51,6 +53,8 @@
indices.create: indices.create:
index: field_caps_index_3 index: field_caps_index_3
body: body:
settings:
index.number_of_shards: 1
mappings: mappings:
t: t:
properties: properties:

View File

@ -30,6 +30,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
@ -106,6 +107,43 @@ public class SmokeMultipleTemplatesIT extends ESIntegTestCase {
assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE); assertEquals(0.9, response.getEvaluationResult(), Double.MIN_VALUE);
} }
public void testTemplateWithAggsFails() {
String template = "{ \"aggs\" : { \"avg_grade\" : { \"avg\" : { \"field\" : \"grade\" }}}}";
assertTemplatedRequestFailures(template, "Query in rated requests should not contain aggregations.");
}
public void testTemplateWithSuggestFails() {
String template = "{\"suggest\" : {\"my-suggestion\" : {\"text\" : \"Elastic\",\"term\" : {\"field\" : \"message\"}}}}";
assertTemplatedRequestFailures(template, "Query in rated requests should not contain a suggest section.");
}
public void testTemplateWithHighlighterFails() {
String template = "{\"highlight\" : { \"fields\" : {\"content\" : {}}}}";
assertTemplatedRequestFailures(template, "Query in rated requests should not contain a highlighter section.");
}
public void testTemplateWithProfileFails() {
String template = "{\"profile\" : \"true\" }";
assertTemplatedRequestFailures(template, "Query in rated requests should not use profile.");
}
public void testTemplateWithExplainFails() {
String template = "{\"explain\" : \"true\" }";
assertTemplatedRequestFailures(template, "Query in rated requests should not use explain.");
}
private static void assertTemplatedRequestFailures(String template, String expectedMessage) {
List<RatedDocument> ratedDocs = Arrays.asList(new RatedDocument("index1", "id1", 1));
RatedRequest ratedRequest = new RatedRequest("id", ratedDocs, Collections.singletonMap("param1", "value1"), "templateId");
Collection<ScriptWithId> templates = Collections.singletonList(new ScriptWithId("templateId",
new Script(ScriptType.INLINE, Script.DEFAULT_TEMPLATE_LANG, template, Collections.emptyMap())));
RankEvalSpec rankEvalSpec = new RankEvalSpec(Collections.singletonList(ratedRequest), new PrecisionAtK(), templates);
RankEvalRequest rankEvalRequest = new RankEvalRequest(rankEvalSpec, new String[] { "test" });
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> client().execute(RankEvalAction.INSTANCE, rankEvalRequest).actionGet());
assertEquals(expectedMessage, e.getMessage());
}
private static List<RatedDocument> createRelevant(String... docs) { private static List<RatedDocument> createRelevant(String... docs) {
List<RatedDocument> relevant = new ArrayList<>(); List<RatedDocument> relevant = new ArrayList<>();
for (String doc : docs) { for (String doc : docs) {

View File

@ -14,6 +14,8 @@
--- ---
"No templates": "No templates":
- skip:
features: default_shards
- do: - do:
cat.templates: {} cat.templates: {}
@ -174,6 +176,8 @@
--- ---
"Sort templates": "Sort templates":
- skip:
features: default_shards
- do: - do:
indices.put_template: indices.put_template:
name: test name: test
@ -222,6 +226,8 @@
--- ---
"Multiple template": "Multiple template":
- skip:
features: default_shards
- do: - do:
indices.put_template: indices.put_template:
name: test_1 name: test_1

View File

@ -1,8 +1,8 @@
--- ---
"Shrink index via API": "Shrink index via API":
- skip: - skip:
version: " - 6.99.99" version: " - 6.3.99"
reason: expects warnings that pre-7.0.0 will not send reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
# creates an index with one document solely allocated on the master node # creates an index with one document solely allocated on the master node
# and shrinks it into a new index with a single shard # and shrinks it into a new index with a single shard
@ -24,7 +24,8 @@
settings: settings:
# ensure everything is allocated on a single node # ensure everything is allocated on a single node
index.routing.allocation.include._id: $master index.routing.allocation.include._id: $master
number_of_replicas: 0 index.number_of_shards: 2
index.number_of_replicas: 0
- do: - do:
index: index:
index: source index: source

View File

@ -1,8 +1,8 @@
--- ---
"Shrink index ignores target template mapping": "Shrink index ignores target template mapping":
- skip: - skip:
version: " - 6.99.99" version: " - 6.3.99"
reason: expects warnings that pre-7.0.0 will not send reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
- do: - do:
@ -20,7 +20,8 @@
settings: settings:
# ensure everything is allocated on a single node # ensure everything is allocated on a single node
index.routing.allocation.include._id: $master index.routing.allocation.include._id: $master
number_of_replicas: 0 index.number_of_shards: 2
index.number_of_replicas: 0
mappings: mappings:
test: test:
properties: properties:

View File

@ -1,8 +1,8 @@
--- ---
"Copy settings during shrink index": "Copy settings during shrink index":
- skip: - skip:
version: " - 6.99.99" version: " - 6.3.99"
reason: expects warnings that pre-7.0.0 will not send reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
- do: - do:
@ -19,6 +19,7 @@
settings: settings:
# ensure everything is allocated on the master node # ensure everything is allocated on the master node
index.routing.allocation.include._id: $master index.routing.allocation.include._id: $master
index.number_of_shards: 2
index.number_of_replicas: 0 index.number_of_replicas: 0
index.merge.scheduler.max_merge_count: 4 index.merge.scheduler.max_merge_count: 4

View File

@ -33,8 +33,8 @@ setup:
--- ---
"Split index via API": "Split index via API":
- skip: - skip:
version: " - 6.99.99" version: " - 6.3.99"
reason: expects warnings that pre-7.0.0 will not send reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
# make it read-only # make it read-only
@ -110,8 +110,8 @@ setup:
# when re-enabling uncomment the below skips # when re-enabling uncomment the below skips
version: "all" version: "all"
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
# version: " - 6.99.99" # version: " - 6.3.99"
# reason: expects warnings that pre-7.0.0 will not send # reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
- do: - do:
indices.create: indices.create:
@ -213,8 +213,8 @@ setup:
--- ---
"Create illegal split indices": "Create illegal split indices":
- skip: - skip:
version: " - 6.99.99" version: " - 6.3.99"
reason: expects warnings that pre-7.0.0 will not send reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
# try to do an illegal split with number_of_routing_shards set # try to do an illegal split with number_of_routing_shards set

View File

@ -4,8 +4,8 @@
# when re-enabling uncomment the below skips # when re-enabling uncomment the below skips
version: "all" version: "all"
reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503"
# version: " - 6.99.99" # version: " - 6.3.99"
# reason: expects warnings that pre-7.0.0 will not send # reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
# create index # create index

View File

@ -1,8 +1,8 @@
--- ---
"Copy settings during split index": "Copy settings during split index":
- skip: - skip:
version: " - 6.99.99" version: " - 6.3.99"
reason: expects warnings that pre-7.0.0 will not send reason: expects warnings that pre-6.4.0 will not send
features: "warnings" features: "warnings"
- do: - do:

View File

@ -17,14 +17,14 @@ setup:
index: test index: test
type: doc type: doc
id: 1 id: 1
body: { "date": "2014-03-03T00:00:00", "keyword": "foo" } body: { "date": "2014-03-03T00:00:00", "keyword": "dgx" }
- do: - do:
index: index:
index: test index: test
type: doc type: doc
id: 2 id: 2
body: { "date": "2015-03-03T00:00:00", "keyword": "bar" } body: { "date": "2015-03-03T00:00:00", "keyword": "dfs" }
- do: - do:
index: index:
@ -38,7 +38,36 @@ setup:
index: test index: test
type: doc type: doc
id: 4 id: 4
body: { "date": "2017-03-03T00:00:00" } body: { "date": "2017-03-03T00:00:00", "keyword": "foo" }
- do:
index:
index: test
type: doc
id: 5
body: { "date": "2018-03-03T00:00:00", "keyword": "bar" }
- do:
index:
index: test
type: doc
id: 6
body: { "date": "2019-03-03T00:00:00", "keyword": "baz" }
- do:
index:
index: test
type: doc
id: 7
body: { "date": "2020-03-03T00:00:00", "keyword": "qux" }
- do:
index:
index: test
type: doc
id: 8
body: { "date": "2021-03-03T00:00:00", "keyword": "quux" }
- do: - do:
indices.refresh: indices.refresh:

View File

@ -253,7 +253,6 @@ import org.elasticsearch.rest.action.admin.indices.RestForceMergeAction;
import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAliasesAction;
import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllAliasesAction;
import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction; import org.elasticsearch.rest.action.admin.indices.RestGetAllMappingsAction;
import org.elasticsearch.rest.action.admin.indices.RestGetAllSettingsAction;
import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction; import org.elasticsearch.rest.action.admin.indices.RestGetFieldMappingAction;
import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndexTemplateAction;
import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction; import org.elasticsearch.rest.action.admin.indices.RestGetIndicesAction;
@ -558,7 +557,6 @@ public class ActionModule extends AbstractModule {
registerHandler.accept(new RestGetAllAliasesAction(settings, restController)); registerHandler.accept(new RestGetAllAliasesAction(settings, restController));
registerHandler.accept(new RestGetAllMappingsAction(settings, restController)); registerHandler.accept(new RestGetAllMappingsAction(settings, restController));
registerHandler.accept(new RestGetAllSettingsAction(settings, restController, indexScopedSettings, settingsFilter));
registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter)); registerHandler.accept(new RestGetIndicesAction(settings, restController, indexScopedSettings, settingsFilter));
registerHandler.accept(new RestIndicesStatsAction(settings, restController)); registerHandler.accept(new RestIndicesStatsAction(settings, restController));
registerHandler.accept(new RestIndicesSegmentsAction(settings, restController)); registerHandler.accept(new RestIndicesSegmentsAction(settings, restController));

View File

@ -101,8 +101,6 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
} }
if (in.getVersion().before(Version.V_6_4_0)) { if (in.getVersion().before(Version.V_6_4_0)) {
copySettings = null; copySettings = null;
} else if (in.getVersion().onOrAfter(Version.V_6_4_0) && in.getVersion().before(Version.V_7_0_0_alpha1)){
copySettings = in.readBoolean();
} else { } else {
copySettings = in.readOptionalBoolean(); copySettings = in.readOptionalBoolean();
} }
@ -116,10 +114,9 @@ public class ResizeRequest extends AcknowledgedRequest<ResizeRequest> implements
if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) {
out.writeEnum(type); out.writeEnum(type);
} }
// noinspection StatementWithEmptyBody
if (out.getVersion().before(Version.V_6_4_0)) { if (out.getVersion().before(Version.V_6_4_0)) {
} else if (out.getVersion().onOrAfter(Version.V_6_4_0) && out.getVersion().before(Version.V_7_0_0_alpha1)) {
out.writeBoolean(copySettings == null ? false : copySettings);
} else { } else {
out.writeOptionalBoolean(copySettings); out.writeOptionalBoolean(copySettings);
} }

View File

@ -366,8 +366,14 @@ public class MetaDataCreateIndexService extends AbstractComponent {
} }
// now, put the request settings, so they override templates // now, put the request settings, so they override templates
indexSettingsBuilder.put(request.settings()); indexSettingsBuilder.put(request.settings());
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
DiscoveryNodes nodes = currentState.nodes();
final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
}
if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5)); final int numberOfShards = getNumberOfShards(indexSettingsBuilder);
indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, numberOfShards));
} }
if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1)); indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
@ -376,12 +382,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS)); indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
} }
if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
DiscoveryNodes nodes = currentState.nodes();
final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
}
if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) { if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis()); indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
} }
@ -573,6 +573,18 @@ public class MetaDataCreateIndexService extends AbstractComponent {
} }
} }
static int getNumberOfShards(final Settings.Builder indexSettingsBuilder) {
// TODO: this logic can be removed when the current major version is 8
assert Version.CURRENT.major == 7;
final int numberOfShards;
if (Version.fromId(Integer.parseInt(indexSettingsBuilder.get(SETTING_VERSION_CREATED))).before(Version.V_7_0_0_alpha1)) {
numberOfShards = 5;
} else {
numberOfShards = 1;
}
return numberOfShards;
}
@Override @Override
public void onFailure(String source, Exception e) { public void onFailure(String source, Exception e) {
if (e instanceof ResourceAlreadyExistsException) { if (e instanceof ResourceAlreadyExistsException) {

View File

@ -114,11 +114,24 @@ public class AllocationService extends AbstractComponent {
} }
protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) { protected ClusterState buildResultAndLogHealthChange(ClusterState oldState, RoutingAllocation allocation, String reason) {
RoutingTable oldRoutingTable = oldState.routingTable(); ClusterState newState = buildResult(oldState, allocation);
RoutingNodes newRoutingNodes = allocation.routingNodes();
logClusterHealthStateChange(
new ClusterStateHealth(oldState),
new ClusterStateHealth(newState),
reason
);
return newState;
}
private ClusterState buildResult(ClusterState oldState, RoutingAllocation allocation) {
final RoutingTable oldRoutingTable = oldState.routingTable();
final RoutingNodes newRoutingNodes = allocation.routingNodes();
final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build(); final RoutingTable newRoutingTable = new RoutingTable.Builder().updateNodes(oldRoutingTable.version(), newRoutingNodes).build();
MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable); final MetaData newMetaData = allocation.updateMetaDataWithRoutingChanges(newRoutingTable);
assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata assert newRoutingTable.validate(newMetaData); // validates the routing table is coherent with the cluster state metadata
final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState) final ClusterState.Builder newStateBuilder = ClusterState.builder(oldState)
.routingTable(newRoutingTable) .routingTable(newRoutingTable)
.metaData(newMetaData); .metaData(newMetaData);
@ -131,13 +144,7 @@ public class AllocationService extends AbstractComponent {
newStateBuilder.customs(customsBuilder.build()); newStateBuilder.customs(customsBuilder.build());
} }
} }
final ClusterState newState = newStateBuilder.build(); return newStateBuilder.build();
logClusterHealthStateChange(
new ClusterStateHealth(oldState),
new ClusterStateHealth(newState),
reason
);
return newState;
} }
// Used for testing // Used for testing
@ -209,24 +216,23 @@ public class AllocationService extends AbstractComponent {
* if needed. * if needed.
*/ */
public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) { public ClusterState deassociateDeadNodes(ClusterState clusterState, boolean reroute, String reason) {
ClusterState fixedClusterState = adaptAutoExpandReplicas(clusterState); RoutingNodes routingNodes = getMutableRoutingNodes(clusterState);
RoutingNodes routingNodes = getMutableRoutingNodes(fixedClusterState);
// shuffle the unassigned nodes, just so we won't have things like poison failed shards // shuffle the unassigned nodes, just so we won't have things like poison failed shards
routingNodes.unassigned().shuffle(); routingNodes.unassigned().shuffle();
RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, fixedClusterState, RoutingAllocation allocation = new RoutingAllocation(allocationDeciders, routingNodes, clusterState,
clusterInfoService.getClusterInfo(), currentNanoTime()); clusterInfoService.getClusterInfo(), currentNanoTime());
// first, clear from the shards any node id they used to belong to that is now dead // first, clear from the shards any node id they used to belong to that is now dead
deassociateDeadNodes(allocation); deassociateDeadNodes(allocation);
if (reroute) { if (allocation.routingNodesChanged()) {
reroute(allocation); clusterState = buildResult(clusterState, allocation);
} }
if (reroute) {
if (fixedClusterState == clusterState && allocation.routingNodesChanged() == false) { return reroute(clusterState, reason);
} else {
return clusterState; return clusterState;
} }
return buildResultAndLogHealthChange(clusterState, allocation, reason);
} }
/** /**

View File

@ -158,6 +158,7 @@ public class KeyStoreWrapper implements SecureSettings {
/** The decrypted secret data. See {@link #decrypt(char[])}. */ /** The decrypted secret data. See {@link #decrypt(char[])}. */
private final SetOnce<Map<String, Entry>> entries = new SetOnce<>(); private final SetOnce<Map<String, Entry>> entries = new SetOnce<>();
private volatile boolean closed;
private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes) { private KeyStoreWrapper(int formatVersion, boolean hasPassword, byte[] dataBytes) {
this.formatVersion = formatVersion; this.formatVersion = formatVersion;
@ -448,8 +449,8 @@ public class KeyStoreWrapper implements SecureSettings {
} }
/** Write the keystore to the given config directory. */ /** Write the keystore to the given config directory. */
public void save(Path configDir, char[] password) throws Exception { public synchronized void save(Path configDir, char[] password) throws Exception {
assert isLoaded(); ensureOpen();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir); SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
// write to tmp file first, then overwrite // write to tmp file first, then overwrite
@ -500,16 +501,22 @@ public class KeyStoreWrapper implements SecureSettings {
} }
} }
/**
* It is possible to retrieve the setting names even if the keystore is closed.
* This allows {@link SecureSetting} to correctly determine that a entry exists even though it cannot be read. Thus attempting to
* read a secure setting after the keystore is closed will generate a "keystore is closed" exception rather than using the fallback
* setting.
*/
@Override @Override
public Set<String> getSettingNames() { public Set<String> getSettingNames() {
assert isLoaded(); assert entries.get() != null : "Keystore is not loaded";
return entries.get().keySet(); return entries.get().keySet();
} }
// TODO: make settings accessible only to code that registered the setting // TODO: make settings accessible only to code that registered the setting
@Override @Override
public SecureString getString(String setting) { public synchronized SecureString getString(String setting) {
assert isLoaded(); ensureOpen();
Entry entry = entries.get().get(setting); Entry entry = entries.get().get(setting);
if (entry == null || entry.type != EntryType.STRING) { if (entry == null || entry.type != EntryType.STRING) {
throw new IllegalArgumentException("Secret setting " + setting + " is not a string"); throw new IllegalArgumentException("Secret setting " + setting + " is not a string");
@ -520,13 +527,12 @@ public class KeyStoreWrapper implements SecureSettings {
} }
@Override @Override
public InputStream getFile(String setting) { public synchronized InputStream getFile(String setting) {
assert isLoaded(); ensureOpen();
Entry entry = entries.get().get(setting); Entry entry = entries.get().get(setting);
if (entry == null || entry.type != EntryType.FILE) { if (entry == null || entry.type != EntryType.FILE) {
throw new IllegalArgumentException("Secret setting " + setting + " is not a file"); throw new IllegalArgumentException("Secret setting " + setting + " is not a file");
} }
return new ByteArrayInputStream(entry.bytes); return new ByteArrayInputStream(entry.bytes);
} }
@ -543,8 +549,8 @@ public class KeyStoreWrapper implements SecureSettings {
} }
/** Set a string setting. */ /** Set a string setting. */
void setString(String setting, char[] value) { synchronized void setString(String setting, char[] value) {
assert isLoaded(); ensureOpen();
validateSettingName(setting); validateSettingName(setting);
ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(value)); ByteBuffer byteBuffer = StandardCharsets.UTF_8.encode(CharBuffer.wrap(value));
@ -556,8 +562,8 @@ public class KeyStoreWrapper implements SecureSettings {
} }
/** Set a file setting. */ /** Set a file setting. */
void setFile(String setting, byte[] bytes) { synchronized void setFile(String setting, byte[] bytes) {
assert isLoaded(); ensureOpen();
validateSettingName(setting); validateSettingName(setting);
Entry oldEntry = entries.get().put(setting, new Entry(EntryType.FILE, Arrays.copyOf(bytes, bytes.length))); Entry oldEntry = entries.get().put(setting, new Entry(EntryType.FILE, Arrays.copyOf(bytes, bytes.length)));
@ -568,15 +574,23 @@ public class KeyStoreWrapper implements SecureSettings {
/** Remove the given setting from the keystore. */ /** Remove the given setting from the keystore. */
void remove(String setting) { void remove(String setting) {
assert isLoaded(); ensureOpen();
Entry oldEntry = entries.get().remove(setting); Entry oldEntry = entries.get().remove(setting);
if (oldEntry != null) { if (oldEntry != null) {
Arrays.fill(oldEntry.bytes, (byte)0); Arrays.fill(oldEntry.bytes, (byte)0);
} }
} }
private void ensureOpen() {
if (closed) {
throw new IllegalStateException("Keystore is closed");
}
assert isLoaded() : "Keystore is not loaded";
}
@Override @Override
public void close() { public synchronized void close() {
this.closed = true;
for (Entry entry : entries.get().values()) { for (Entry entry : entries.get().values()) {
Arrays.fill(entry.bytes, (byte)0); Arrays.fill(entry.bytes, (byte)0);
} }

View File

@ -380,7 +380,7 @@ public class NodeJoinController extends AbstractComponent {
/** /**
* a task indicated that the current node should become master, if no current master is known * a task indicated that the current node should become master, if no current master is known
*/ */
private static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_", public static final DiscoveryNode BECOME_MASTER_TASK = new DiscoveryNode("_BECOME_MASTER_TASK_",
new TransportAddress(TransportAddress.META_ADDRESS, 0), new TransportAddress(TransportAddress.META_ADDRESS, 0),
Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) {
@Override @Override
@ -393,7 +393,7 @@ public class NodeJoinController extends AbstractComponent {
* a task that is used to signal the election is stopped and we should process pending joins. * a task that is used to signal the election is stopped and we should process pending joins.
* it may be use in combination with {@link #BECOME_MASTER_TASK} * it may be use in combination with {@link #BECOME_MASTER_TASK}
*/ */
private static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_", public static final DiscoveryNode FINISH_ELECTION_TASK = new DiscoveryNode("_FINISH_ELECTION_",
new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) { new TransportAddress(TransportAddress.META_ADDRESS, 0), Collections.emptyMap(), Collections.emptySet(), Version.CURRENT) {
@Override @Override
public String toString() { public String toString() {

View File

@ -1,121 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.indices;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.ToXContent.Params;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
import org.elasticsearch.rest.action.RestBuilderListener;
import java.io.IOException;
import java.util.List;
import java.util.Set;
import static org.elasticsearch.rest.RestRequest.Method.GET;
import static org.elasticsearch.rest.RestRequest.Method.HEAD;
import static org.elasticsearch.rest.RestStatus.OK;
/**
* The REST handler for retrieving all settings
*/
public class RestGetAllSettingsAction extends BaseRestHandler {
private final IndexScopedSettings indexScopedSettings;
private final SettingsFilter settingsFilter;
public RestGetAllSettingsAction(final Settings settings, final RestController controller,
final IndexScopedSettings indexScopedSettings, final SettingsFilter settingsFilter) {
super(settings);
this.indexScopedSettings = indexScopedSettings;
controller.registerHandler(GET, "/_settings", this);
this.settingsFilter = settingsFilter;
}
@Override
public String getName() {
return "get_all_settings_action";
}
@Override
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
final GetIndexRequest getIndexRequest = new GetIndexRequest();
getIndexRequest.indices(Strings.EMPTY_ARRAY);
getIndexRequest.features(Feature.SETTINGS);
getIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, getIndexRequest.indicesOptions()));
getIndexRequest.local(request.paramAsBoolean("local", getIndexRequest.local()));
getIndexRequest.humanReadable(request.paramAsBoolean("human", false));
// This is required so the "flat_settings" parameter counts as consumed
request.paramAsBoolean("flat_settings", false);
final boolean defaults = request.paramAsBoolean("include_defaults", false);
return channel -> client.admin().indices().getIndex(getIndexRequest, new RestBuilderListener<GetIndexResponse>(channel) {
@Override
public RestResponse buildResponse(final GetIndexResponse response, final XContentBuilder builder) throws Exception {
builder.startObject();
{
for (final String index : response.indices()) {
builder.startObject(index);
{
writeSettings(response.settings().get(index), builder, request, defaults);
}
builder.endObject();
}
}
builder.endObject();
return new BytesRestResponse(OK, builder);
}
private void writeSettings(final Settings settings, final XContentBuilder builder,
final Params params, final boolean defaults) throws IOException {
builder.startObject("settings");
{
settings.toXContent(builder, params);
}
builder.endObject();
if (defaults) {
builder.startObject("defaults");
{
settingsFilter
.filter(indexScopedSettings.diff(settings, RestGetAllSettingsAction.this.settings))
.toXContent(builder, request);
}
builder.endObject();
}
}
});
}
}

View File

@ -19,16 +19,12 @@
package org.elasticsearch.rest.action.admin.indices; package org.elasticsearch.rest.action.admin.indices;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings; import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.BytesRestResponse; import org.elasticsearch.rest.BytesRestResponse;
@ -46,6 +42,7 @@ public class RestGetSettingsAction extends BaseRestHandler {
public RestGetSettingsAction(Settings settings, RestController controller) { public RestGetSettingsAction(Settings settings, RestController controller) {
super(settings); super(settings);
controller.registerHandler(GET, "/_settings", this);
controller.registerHandler(GET, "/_settings/{name}", this); controller.registerHandler(GET, "/_settings/{name}", this);
controller.registerHandler(GET, "/{index}/_settings", this); controller.registerHandler(GET, "/{index}/_settings", this);
controller.registerHandler(GET, "/{index}/_settings/{name}", this); controller.registerHandler(GET, "/{index}/_settings/{name}", this);

View File

@ -29,6 +29,8 @@ import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentParser;
@ -46,6 +48,11 @@ import java.util.Map;
*/ */
public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXContentFragment { public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXContentFragment {
/**
* Standard deprecation logger for used to deprecate allowance of empty templates.
*/
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ScriptMetaData.class));
/** /**
* A builder used to modify the currently stored scripts data held within * A builder used to modify the currently stored scripts data held within
* the {@link ClusterState}. Scripts can be added or deleted, then built * the {@link ClusterState}. Scripts can be added or deleted, then built
@ -161,8 +168,8 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
* *
* {@code * {@code
* { * {
* "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", * "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>",
* "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", * "<id>" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>",
* ... * ...
* } * }
* } * }
@ -209,6 +216,14 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
lang = id.substring(0, split); lang = id.substring(0, split);
id = id.substring(split + 1); id = id.substring(split + 1);
source = new StoredScriptSource(lang, parser.text(), Collections.emptyMap()); source = new StoredScriptSource(lang, parser.text(), Collections.emptyMap());
if (source.getSource().isEmpty()) {
if (source.getLang().equals(Script.DEFAULT_TEMPLATE_LANG)) {
DEPRECATION_LOGGER.deprecated("empty templates should no longer be used");
} else {
DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used");
}
}
} }
exists = scripts.get(id); exists = scripts.get(id);
@ -231,7 +246,7 @@ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXCont
} }
exists = scripts.get(id); exists = scripts.get(id);
source = StoredScriptSource.fromXContent(parser); source = StoredScriptSource.fromXContent(parser, true);
if (exists == null) { if (exists == null) {
scripts.put(id, source); scripts.put(id, source);

View File

@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ObjectParser;
@ -57,6 +59,11 @@ import java.util.Objects;
*/ */
public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> implements Writeable, ToXContentObject { public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> implements Writeable, ToXContentObject {
/**
* Standard deprecation logger for used to deprecate allowance of empty templates.
*/
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(StoredScriptSource.class));
/** /**
* Standard {@link ParseField} for outer level of stored script source. * Standard {@link ParseField} for outer level of stored script source.
*/ */
@ -109,7 +116,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
private void setSource(XContentParser parser) { private void setSource(XContentParser parser) {
try { try {
if (parser.currentToken() == Token.START_OBJECT) { if (parser.currentToken() == Token.START_OBJECT) {
//this is really for search templates, that need to be converted to json format // this is really for search templates, that need to be converted to json format
XContentBuilder builder = XContentFactory.jsonBuilder(); XContentBuilder builder = XContentFactory.jsonBuilder();
source = Strings.toString(builder.copyCurrentStructure(parser)); source = Strings.toString(builder.copyCurrentStructure(parser));
options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType());
@ -131,8 +138,12 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
/** /**
* Validates the parameters and creates an {@link StoredScriptSource}. * Validates the parameters and creates an {@link StoredScriptSource}.
*
* @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check.
* This allow empty templates to be loaded for backwards compatibility.
* This allow empty templates to be loaded for backwards compatibility.
*/ */
private StoredScriptSource build() { private StoredScriptSource build(boolean ignoreEmpty) {
if (lang == null) { if (lang == null) {
throw new IllegalArgumentException("must specify lang for stored script"); throw new IllegalArgumentException("must specify lang for stored script");
} else if (lang.isEmpty()) { } else if (lang.isEmpty()) {
@ -140,9 +151,25 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
} }
if (source == null) { if (source == null) {
throw new IllegalArgumentException("must specify source for stored script"); if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) {
if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) {
DEPRECATION_LOGGER.deprecated("empty templates should no longer be used");
} else {
DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used");
}
} else {
throw new IllegalArgumentException("must specify source for stored script");
}
} else if (source.isEmpty()) { } else if (source.isEmpty()) {
throw new IllegalArgumentException("source cannot be empty"); if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) {
if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) {
DEPRECATION_LOGGER.deprecated("empty templates should no longer be used");
} else {
DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used");
}
} else {
throw new IllegalArgumentException("source cannot be empty");
}
} }
if (options.size() > 1 || options.size() == 1 && options.get(Script.CONTENT_TYPE_OPTION) == null) { if (options.size() > 1 || options.size() == 1 && options.get(Script.CONTENT_TYPE_OPTION) == null) {
@ -257,6 +284,8 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
token = parser.nextToken(); token = parser.nextToken();
if (token == Token.END_OBJECT) { if (token == Token.END_OBJECT) {
DEPRECATION_LOGGER.deprecated("empty templates should no longer be used");
return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap());
} }
@ -271,7 +300,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
token = parser.nextToken(); token = parser.nextToken();
if (token == Token.START_OBJECT) { if (token == Token.START_OBJECT) {
return PARSER.apply(parser, null).build(); return PARSER.apply(parser, null).build(false);
} else { } else {
throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, <source>]"); throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, <source>]");
} }
@ -280,7 +309,13 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
token = parser.nextToken(); token = parser.nextToken();
if (token == Token.VALUE_STRING) { if (token == Token.VALUE_STRING) {
return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, parser.text(), Collections.emptyMap()); String source = parser.text();
if (source == null || source.isEmpty()) {
DEPRECATION_LOGGER.deprecated("empty templates should no longer be used");
}
return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap());
} }
} }
@ -293,7 +328,13 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
builder.copyCurrentStructure(parser); builder.copyCurrentStructure(parser);
} }
return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, Strings.toString(builder), Collections.emptyMap()); String source = Strings.toString(builder);
if (source == null || source.isEmpty()) {
DEPRECATION_LOGGER.deprecated("empty templates should no longer be used");
}
return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap());
} }
} }
} catch (IOException ioe) { } catch (IOException ioe) {
@ -320,9 +361,12 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
* *
* Note that the "source" parameter can also handle template parsing including from * Note that the "source" parameter can also handle template parsing including from
* a complex JSON object. * a complex JSON object.
*
* @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check.
* This allows empty templates to be loaded for backwards compatibility.
*/ */
public static StoredScriptSource fromXContent(XContentParser parser) { public static StoredScriptSource fromXContent(XContentParser parser, boolean ignoreEmpty) {
return PARSER.apply(parser, null).build(); return PARSER.apply(parser, null).build(ignoreEmpty);
} }
/** /**

View File

@ -96,7 +96,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
logger.info("--> creating an index with no replicas"); logger.info("--> creating an index with no replicas");
client().admin().indices().prepareCreate("test") client().admin().indices().prepareCreate("test")
.setSettings(Settings.builder().put("index.number_of_replicas", 0)) .setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
.execute().actionGet(); .execute().actionGet();
ensureGreen(); ensureGreen();

View File

@ -18,8 +18,36 @@
*/ */
package org.elasticsearch.cluster.metadata; package org.elasticsearch.cluster.metadata;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest;
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
import org.elasticsearch.action.support.ActiveShardCount;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.indices.cluster.ClusterStateChanges;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.hamcrest.Matchers.everyItem;
import static org.hamcrest.Matchers.isIn;
public class AutoExpandReplicasTests extends ESTestCase { public class AutoExpandReplicasTests extends ESTestCase {
@ -72,4 +100,104 @@ public class AutoExpandReplicasTests extends ESTestCase {
} }
} }
private static final AtomicInteger nodeIdGenerator = new AtomicInteger();
protected DiscoveryNode createNode(DiscoveryNode.Role... mustHaveRoles) {
Set<DiscoveryNode.Role> roles = new HashSet<>(randomSubsetOf(Sets.newHashSet(DiscoveryNode.Role.values())));
for (DiscoveryNode.Role mustHaveRole : mustHaveRoles) {
roles.add(mustHaveRole);
}
final String id = String.format(Locale.ROOT, "node_%03d", nodeIdGenerator.incrementAndGet());
return new DiscoveryNode(id, id, buildNewFakeTransportAddress(), Collections.emptyMap(), roles,
Version.CURRENT);
}
/**
* Checks that when nodes leave the cluster that the auto-expand-replica functionality only triggers after failing the shards on
* the removed nodes. This ensures that active shards on other live nodes are not failed if the primary resided on a now dead node.
* Instead, one of the replicas on the live nodes first gets promoted to primary, and the auto-expansion (removing replicas) only
* triggers in a follow-up step.
*/
public void testAutoExpandWhenNodeLeavesAndPossiblyRejoins() throws InterruptedException {
final ThreadPool threadPool = new TestThreadPool(getClass().getName());
final ClusterStateChanges cluster = new ClusterStateChanges(xContentRegistry(), threadPool);
try {
List<DiscoveryNode> allNodes = new ArrayList<>();
DiscoveryNode localNode = createNode(DiscoveryNode.Role.MASTER); // local node is the master
allNodes.add(localNode);
int numDataNodes = randomIntBetween(3, 5);
List<DiscoveryNode> dataNodes = new ArrayList<>(numDataNodes);
for (int i = 0; i < numDataNodes; i++) {
dataNodes.add(createNode(DiscoveryNode.Role.DATA));
}
allNodes.addAll(dataNodes);
ClusterState state = ClusterStateCreationUtils.state(localNode, localNode, allNodes.toArray(new DiscoveryNode[allNodes.size()]));
CreateIndexRequest request = new CreateIndexRequest("index",
Settings.builder()
.put(SETTING_NUMBER_OF_SHARDS, 1)
.put(SETTING_AUTO_EXPAND_REPLICAS, "0-all").build())
.waitForActiveShards(ActiveShardCount.NONE);
state = cluster.createIndex(state, request);
assertTrue(state.metaData().hasIndex("index"));
while (state.routingTable().index("index").shard(0).allShardsStarted() == false) {
logger.info(state);
state = cluster.applyStartedShards(state,
state.routingTable().index("index").shard(0).shardsWithState(ShardRoutingState.INITIALIZING));
state = cluster.reroute(state, new ClusterRerouteRequest());
}
IndexShardRoutingTable preTable = state.routingTable().index("index").shard(0);
final Set<String> unchangedNodeIds;
final IndexShardRoutingTable postTable;
if (randomBoolean()) {
// simulate node removal
List<DiscoveryNode> nodesToRemove = randomSubsetOf(2, dataNodes);
unchangedNodeIds = dataNodes.stream().filter(n -> nodesToRemove.contains(n) == false)
.map(DiscoveryNode::getId).collect(Collectors.toSet());
state = cluster.removeNodes(state, nodesToRemove);
postTable = state.routingTable().index("index").shard(0);
assertTrue("not all shards started in " + state.toString(), postTable.allShardsStarted());
assertThat(postTable.toString(), postTable.getAllAllocationIds(), everyItem(isIn(preTable.getAllAllocationIds())));
} else {
// fake an election where conflicting nodes are removed and readded
state = ClusterState.builder(state).nodes(DiscoveryNodes.builder(state.nodes()).masterNodeId(null).build()).build();
List<DiscoveryNode> conflictingNodes = randomSubsetOf(2, dataNodes);
unchangedNodeIds = dataNodes.stream().filter(n -> conflictingNodes.contains(n) == false)
.map(DiscoveryNode::getId).collect(Collectors.toSet());
List<DiscoveryNode> nodesToAdd = conflictingNodes.stream()
.map(n -> new DiscoveryNode(n.getName(), n.getId(), buildNewFakeTransportAddress(), n.getAttributes(), n.getRoles(), n.getVersion()))
.collect(Collectors.toList());
if (randomBoolean()) {
nodesToAdd.add(createNode(DiscoveryNode.Role.DATA));
}
state = cluster.joinNodesAndBecomeMaster(state, nodesToAdd);
postTable = state.routingTable().index("index").shard(0);
}
Set<String> unchangedAllocationIds = preTable.getShards().stream().filter(shr -> unchangedNodeIds.contains(shr.currentNodeId()))
.map(shr -> shr.allocationId().getId()).collect(Collectors.toSet());
assertThat(postTable.toString(), unchangedAllocationIds, everyItem(isIn(postTable.getAllAllocationIds())));
postTable.getShards().forEach(
shardRouting -> {
if (shardRouting.assignedToNode() && unchangedAllocationIds.contains(shardRouting.allocationId().getId())) {
assertTrue("Shard should be active: " + shardRouting, shardRouting.active());
}
}
);
} finally {
terminate(threadPool);
}
}
} }

View File

@ -185,7 +185,7 @@ public class IndexCreationTaskTests extends ESTestCase {
public void testDefaultSettings() throws Exception { public void testDefaultSettings() throws Exception {
final ClusterState result = executeTask(); final ClusterState result = executeTask();
assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("5")); assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("1"));
} }
public void testSettingsFromClusterState() throws Exception { public void testSettingsFromClusterState() throws Exception {

View File

@ -56,6 +56,7 @@ import java.util.stream.Collectors;
import java.util.stream.Stream; import java.util.stream.Stream;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED;
import static org.hamcrest.Matchers.endsWith; import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -92,6 +93,21 @@ public class MetaDataCreateIndexServiceTests extends ESTestCase {
return source * x == target; return source * x == target;
} }
public void testNumberOfShards() {
{
final Version versionCreated = VersionUtils.randomVersionBetween(
random(),
Version.V_6_0_0_alpha1, VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1));
final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated);
assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(5));
}
{
final Version versionCreated = VersionUtils.randomVersionBetween(random(), Version.V_7_0_0_alpha1, Version.CURRENT);
final Settings.Builder indexSettingsBuilder = Settings.builder().put(SETTING_VERSION_CREATED, versionCreated);
assertThat(MetaDataCreateIndexService.IndexCreationTask.getNumberOfShards(indexSettingsBuilder), equalTo(1));
}
}
public void testValidateShrinkIndex() { public void testValidateShrinkIndex() {
int numShards = randomIntBetween(2, 42); int numShards = randomIntBetween(2, 42);
ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10),

View File

@ -48,11 +48,13 @@ import org.elasticsearch.common.Randomness;
import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.Environment; import org.elasticsearch.env.Environment;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.hamcrest.Matchers;
import org.junit.After; import org.junit.After;
import org.junit.Before; import org.junit.Before;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.notNullValue;
import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.instanceOf;
public class KeyStoreWrapperTests extends ESTestCase { public class KeyStoreWrapperTests extends ESTestCase {
@ -97,6 +99,19 @@ public class KeyStoreWrapperTests extends ESTestCase {
assertTrue(keystore.getSettingNames().contains(KeyStoreWrapper.SEED_SETTING.getKey())); assertTrue(keystore.getSettingNames().contains(KeyStoreWrapper.SEED_SETTING.getKey()));
} }
public void testCannotReadStringFromClosedKeystore() throws Exception {
KeyStoreWrapper keystore = KeyStoreWrapper.create();
assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey()));
assertThat(keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()), notNullValue());
keystore.close();
assertThat(keystore.getSettingNames(), Matchers.hasItem(KeyStoreWrapper.SEED_SETTING.getKey()));
final IllegalStateException exception = expectThrows(IllegalStateException.class,
() -> keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()));
assertThat(exception.getMessage(), containsString("closed"));
}
public void testUpgradeNoop() throws Exception { public void testUpgradeNoop() throws Exception {
KeyStoreWrapper keystore = KeyStoreWrapper.create(); KeyStoreWrapper keystore = KeyStoreWrapper.create();
SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()); SecureString seed = keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey());

View File

@ -87,6 +87,7 @@ import org.elasticsearch.transport.Transport;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
@ -232,6 +233,15 @@ public class ClusterStateChanges extends AbstractComponent {
return runTasks(joinTaskExecutor, clusterState, nodes); return runTasks(joinTaskExecutor, clusterState, nodes);
} }
public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List<DiscoveryNode> nodes) {
List<DiscoveryNode> joinNodes = new ArrayList<>();
joinNodes.add(NodeJoinController.BECOME_MASTER_TASK);
joinNodes.add(NodeJoinController.FINISH_ELECTION_TASK);
joinNodes.addAll(nodes);
return runTasks(joinTaskExecutor, clusterState, joinNodes);
}
public ClusterState removeNodes(ClusterState clusterState, List<DiscoveryNode> nodes) { public ClusterState removeNodes(ClusterState clusterState, List<DiscoveryNode> nodes) {
return runTasks(nodeRemovalExecutor, clusterState, nodes.stream() return runTasks(nodeRemovalExecutor, clusterState, nodes.stream()
.map(n -> new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(n, "dummy reason")).collect(Collectors.toList())); .map(n -> new ZenDiscovery.NodeRemovalClusterStateTaskExecutor.Task(n, "dummy reason")).collect(Collectors.toList()));

View File

@ -22,6 +22,8 @@ import org.elasticsearch.cluster.DiffableUtils;
import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
@ -130,6 +132,45 @@ public class ScriptMetaDataTests extends AbstractSerializingTestCase<ScriptMetaD
assertEquals("1 + 1", result.getStoredScript("_id").getSource()); assertEquals("1 + 1", result.getStoredScript("_id").getSource());
} }
public void testLoadEmptyScripts() throws IOException {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.startObject().field("mustache#empty", "").endObject();
XContentParser parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(builder).streamInput());
ScriptMetaData.fromXContent(parser);
assertWarnings("empty templates should no longer be used");
builder = XContentFactory.jsonBuilder();
builder.startObject().field("lang#empty", "").endObject();
parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(builder).streamInput());
ScriptMetaData.fromXContent(parser);
assertWarnings("empty scripts should no longer be used");
builder = XContentFactory.jsonBuilder();
builder.startObject().startObject("script").field("lang", "lang").field("source", "").endObject().endObject();
parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(builder).streamInput());
ScriptMetaData.fromXContent(parser);
assertWarnings("empty scripts should no longer be used");
builder = XContentFactory.jsonBuilder();
builder.startObject().startObject("script").field("lang", "mustache").field("source", "").endObject().endObject();
parser = XContentType.JSON.xContent()
.createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION,
BytesReference.bytes(builder).streamInput());
ScriptMetaData.fromXContent(parser);
assertWarnings("empty templates should no longer be used");
}
@Override
protected boolean enableWarningsCheck() {
return true;
}
private ScriptMetaData randomScriptMetaData(XContentType sourceContentType, int minNumberScripts) throws IOException { private ScriptMetaData randomScriptMetaData(XContentType sourceContentType, int minNumberScripts) throws IOException {
ScriptMetaData.Builder builder = new ScriptMetaData.Builder(null); ScriptMetaData.Builder builder = new ScriptMetaData.Builder(null);
int numScripts = scaledRandomIntBetween(minNumberScripts, 32); int numScripts = scaledRandomIntBetween(minNumberScripts, 32);

View File

@ -58,7 +58,7 @@ public class StoredScriptSourceTests extends AbstractSerializingTestCase<StoredS
@Override @Override
protected StoredScriptSource doParseInstance(XContentParser parser) { protected StoredScriptSource doParseInstance(XContentParser parser) {
return StoredScriptSource.fromXContent(parser); return StoredScriptSource.fromXContent(parser, false);
} }
@Override @Override

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.AbstractSerializingTestCase; import org.elasticsearch.test.AbstractSerializingTestCase;
import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -204,6 +205,39 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
} }
} }
public void testEmptyTemplateDeprecations() throws IOException {
try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) {
builder.startObject().endObject();
StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON);
StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap());
assertThat(parsed, equalTo(source));
assertWarnings("empty templates should no longer be used");
}
try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) {
builder.startObject().field("template", "").endObject();
StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON);
StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap());
assertThat(parsed, equalTo(source));
assertWarnings("empty templates should no longer be used");
}
try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) {
builder.startObject().field("script").startObject().field("lang", "mustache")
.field("source", "").endObject().endObject();
StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON);
StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap());
assertThat(parsed, equalTo(source));
assertWarnings("empty templates should no longer be used");
}
}
@Override @Override
protected StoredScriptSource createTestInstance() { protected StoredScriptSource createTestInstance() {
return new StoredScriptSource( return new StoredScriptSource(
@ -219,7 +253,7 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
@Override @Override
protected StoredScriptSource doParseInstance(XContentParser parser) { protected StoredScriptSource doParseInstance(XContentParser parser) {
return StoredScriptSource.fromXContent(parser); return StoredScriptSource.fromXContent(parser, false);
} }
@Override @Override

View File

@ -84,7 +84,7 @@ public class InternalExtendedStatsTests extends InternalAggregationTestCase<Inte
assertEquals(expectedCount, reduced.getCount()); assertEquals(expectedCount, reduced.getCount());
// The order in which you add double values in java can give different results. The difference can // The order in which you add double values in java can give different results. The difference can
// be larger for large sum values, so we make the delta in the assertion depend on the values magnitude // be larger for large sum values, so we make the delta in the assertion depend on the values magnitude
assertEquals(expectedSum, reduced.getSum(), Math.abs(expectedSum) * 1e-11); assertEquals(expectedSum, reduced.getSum(), Math.abs(expectedSum) * 1e-10);
assertEquals(expectedMin, reduced.getMin(), 0d); assertEquals(expectedMin, reduced.getMin(), 0d);
assertEquals(expectedMax, reduced.getMax(), 0d); assertEquals(expectedMax, reduced.getMax(), 0d);
// summing squared values, see reason for delta above // summing squared values, see reason for delta above

View File

@ -19,9 +19,7 @@
package org.elasticsearch.search.aggregations.pipeline.moving.avg; package org.elasticsearch.search.aggregations.pipeline.moving.avg;
import org.apache.lucene.util.LuceneTestCase;
import org.elasticsearch.action.bulk.BulkRequestBuilder; import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchPhaseExecutionException; import org.elasticsearch.action.search.SearchPhaseExecutionException;
import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchResponse;
@ -45,7 +43,6 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuil
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.hamcrest.Matchers; import org.hamcrest.Matchers;
import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
@ -69,7 +66,6 @@ import static org.hamcrest.core.IsNull.notNullValue;
import static org.hamcrest.core.IsNull.nullValue; import static org.hamcrest.core.IsNull.nullValue;
@ESIntegTestCase.SuiteScopeTestCase @ESIntegTestCase.SuiteScopeTestCase
@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29456")
public class MovAvgIT extends ESIntegTestCase { public class MovAvgIT extends ESIntegTestCase {
private static final String INTERVAL_FIELD = "l_value"; private static final String INTERVAL_FIELD = "l_value";
private static final String VALUE_FIELD = "v_value"; private static final String VALUE_FIELD = "v_value";
@ -1308,7 +1304,7 @@ public class MovAvgIT extends ESIntegTestCase {
} else { } else {
assertThat("[value] movavg is null", valuesMovAvg, notNullValue()); assertThat("[value] movavg is null", valuesMovAvg, notNullValue());
assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]", assertEquals("[value] movavg does not match expected [" + valuesMovAvg.value() + " vs " + expectedValue + "]",
valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(countMovAvg.value())); valuesMovAvg.value(), expectedValue, 0.1 * Math.abs(valuesMovAvg.value()));
} }
} }

View File

@ -21,7 +21,10 @@ package org.elasticsearch.test.rest.yaml;
import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.RandomizedTest;
import org.apache.http.HttpHost; import org.apache.http.HttpHost;
import org.apache.http.entity.StringEntity;
import org.apache.http.message.BasicHeader;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response; import org.elasticsearch.client.Response;
import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.ResponseException;
import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClient;
@ -29,6 +32,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.PathUtils;
import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi;
import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; import org.elasticsearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec;
@ -38,6 +42,7 @@ import org.elasticsearch.test.rest.yaml.section.DoSection;
import org.elasticsearch.test.rest.yaml.section.ExecutableSection; import org.elasticsearch.test.rest.yaml.section.ExecutableSection;
import org.junit.AfterClass; import org.junit.AfterClass;
import org.junit.Before; import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files; import java.nio.file.Files;
@ -94,6 +99,13 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase {
this.testCandidate = testCandidate; this.testCandidate = testCandidate;
} }
private static boolean useDefaultNumberOfShards;
@BeforeClass
public static void initializeUseDefaultNumberOfShards() {
useDefaultNumberOfShards = usually();
}
@Before @Before
public void initAndResetContext() throws Exception { public void initAndResetContext() throws Exception {
if (restTestExecutionContext == null) { if (restTestExecutionContext == null) {
@ -318,6 +330,14 @@ public abstract class ESClientYamlSuiteTestCase extends ESRestTestCase {
throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]"); throw new IllegalArgumentException("No executable sections loaded for [" + testCandidate.getTestPath() + "]");
} }
if (useDefaultNumberOfShards == false
&& testCandidate.getTestSection().getSkipSection().getFeatures().contains("default_shards") == false) {
final Request request = new Request("PUT", "/_template/global");
request.setHeaders(new BasicHeader("Content-Type", XContentType.JSON.mediaTypeWithoutParameters()));
request.setEntity(new StringEntity("{\"index_patterns\":[\"*\"],\"settings\":{\"index.number_of_shards\":2}}"));
adminClient().performRequest(request);
}
if (!testCandidate.getSetupSection().isEmpty()) { if (!testCandidate.getSetupSection().isEmpty()) {
logger.debug("start setup test [{}]", testCandidate.getTestPath()); logger.debug("start setup test [{}]", testCandidate.getTestPath());
for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) { for (DoSection doSection : testCandidate.getSetupSection().getDoSections()) {

View File

@ -37,6 +37,7 @@ import static java.util.Collections.unmodifiableList;
public final class Features { public final class Features {
private static final List<String> SUPPORTED = unmodifiableList(Arrays.asList( private static final List<String> SUPPORTED = unmodifiableList(Arrays.asList(
"catch_unauthorized", "catch_unauthorized",
"default_shards",
"embedded_stash_key", "embedded_stash_key",
"headers", "headers",
"stash_in_key", "stash_in_key",

View File

@ -81,7 +81,7 @@ buildRestTests.expectedUnconvertedCandidates = [
'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/ml/validate-job.asciidoc',
'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/security/authenticate.asciidoc',
'en/rest-api/watcher/stats.asciidoc', 'en/rest-api/watcher/stats.asciidoc',
'en/security/authorization.asciidoc', 'en/security/authorization/overview.asciidoc',
'en/watcher/example-watches/watching-time-series-data.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc',
] ]

View File

@ -43,7 +43,7 @@ environment variable.
=== Examples === Examples
The following command generates a `system_key` file in the The following command generates a `system_key` file in the
default `$ES_HOME/config/x-pack` directory: default `$ES_HOME/config` directory:
[source, sh] [source, sh]
-------------------------------------------------- --------------------------------------------------

View File

@ -62,11 +62,11 @@ roles provide these privileges. For more information, see
==== Examples ==== Examples
The following example gets information about one category for the The following example gets information about one category for the
`it_ops_new_logs` job: `esxi_log` job:
[source,js] [source,js]
-------------------------------------------------- --------------------------------------------------
GET _xpack/ml/anomaly_detectors/it_ops_new_logs/results/categories GET _xpack/ml/anomaly_detectors/esxi_log/results/categories
{ {
"page":{ "page":{
"size": 1 "size": 1
@ -83,14 +83,18 @@ In this example, the API returns the following information:
"count": 11, "count": 11,
"categories": [ "categories": [
{ {
"job_id": "it_ops_new_logs", "job_id" : "esxi_log",
"category_id": 1, "category_id" : 1,
"terms": "Actual Transaction Already Voided Reversed hostname dbserver.acme.com physicalhost esxserver1.acme.com vmhost app1.acme.com", "terms" : "Vpxa verbose vpxavpxaInvtVm opID VpxaInvtVmChangeListener Guest DiskInfo Changed",
"regex": ".*?Actual.+?Transaction.+?Already.+?Voided.+?Reversed.+?hostname.+?dbserver.acme.com.+?physicalhost.+?esxserver1.acme.com.+?vmhost.+?app1.acme.com.*", "regex" : ".*?Vpxa.+?verbose.+?vpxavpxaInvtVm.+?opID.+?VpxaInvtVmChangeListener.+?Guest.+?DiskInfo.+?Changed.*",
"max_matching_length": 137, "max_matching_length": 154,
"examples": [ "examples" : [
"Actual Transaction Already Voided / Reversed;hostname=dbserver.acme.com;physicalhost=esxserver1.acme.com;vmhost=app1.acme.com" "Oct 19 17:04:44 esxi1.acme.com Vpxa: [3CB3FB90 verbose 'vpxavpxaInvtVm' opID=WFU-33d82c31] [VpxaInvtVmChangeListener] Guest DiskInfo Changed",
] "Oct 19 17:04:45 esxi2.acme.com Vpxa: [3CA66B90 verbose 'vpxavpxaInvtVm' opID=WFU-33927856] [VpxaInvtVmChangeListener] Guest DiskInfo Changed",
"Oct 19 17:04:51 esxi1.acme.com Vpxa: [FFDBAB90 verbose 'vpxavpxaInvtVm' opID=WFU-25e0d447] [VpxaInvtVmChangeListener] Guest DiskInfo Changed",
"Oct 19 17:04:58 esxi2.acme.com Vpxa: [FFDDBB90 verbose 'vpxavpxaInvtVm' opID=WFU-bbff0134] [VpxaInvtVmChangeListener] Guest DiskInfo Changed"
],
"grok_pattern" : ".*?%{SYSLOGTIMESTAMP:timestamp}.+?Vpxa.+?%{BASE16NUM:field}.+?verbose.+?vpxavpxaInvtVm.+?opID.+?VpxaInvtVmChangeListener.+?Guest.+?DiskInfo.+?Changed.*"
} }
] ]
} }

View File

@ -405,6 +405,13 @@ A category resource has the following properties:
`examples`:: `examples`::
(array) A list of examples of actual values that matched the category. (array) A list of examples of actual values that matched the category.
`grok_pattern`::
experimental[] (string) A Grok pattern that could be used in Logstash or an
Ingest Pipeline to extract fields from messages that match the category. This
field is experimental and may be changed or removed in a future release. The
Grok patterns that are found are not optimal, but are often a good starting
point for manual tweaking.
`job_id`:: `job_id`::
(string) The unique identifier for the job that these results belong to. (string) The unique identifier for the job that these results belong to.

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[auditing]] [[auditing]]
== Auditing Security Events == Auditing security events
You can enable auditing to keep track of security-related events such as You can enable auditing to keep track of security-related events such as
authentication failures and refused connections. Logging these events enables you authentication failures and refused connections. Logging these events enables you
@ -40,7 +41,7 @@ events are pushed to the index by setting
[float] [float]
[[audit-event-types]] [[audit-event-types]]
=== Audit Event Types === Audit event types
Each request may generate multiple audit events. Each request may generate multiple audit events.
The following is a list of the events that can be generated: The following is a list of the events that can be generated:
@ -81,11 +82,11 @@ The following is a list of the events that can be generated:
[float] [float]
[[audit-event-attributes]] [[audit-event-attributes]]
=== Audit Event Attributes === Audit event attributes
The following table shows the common attributes that can be associated with every event. The following table shows the common attributes that can be associated with every event.
.Common Attributes .Common attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -103,7 +104,7 @@ The following table shows the common attributes that can be associated with ever
The following tables show the attributes that can be associated with each type of event. The following tables show the attributes that can be associated with each type of event.
The log level determines which attributes are included in a log entry. The log level determines which attributes are included in a log entry.
.REST anonymous_access_denied Attributes .REST anonymous_access_denied attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -112,7 +113,7 @@ The log level determines which attributes are included in a log entry.
| `request_body` | The body of the request, if enabled. | `request_body` | The body of the request, if enabled.
|====== |======
.REST authentication_success Attributes .REST authentication_success attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -123,7 +124,7 @@ The log level determines which attributes are included in a log entry.
| `request_body` | The body of the request, if enabled. | `request_body` | The body of the request, if enabled.
|====== |======
.REST authentication_failed Attributes .REST authentication_failed attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -133,7 +134,7 @@ The log level determines which attributes are included in a log entry.
| `request_body` | The body of the request, if enabled. | `request_body` | The body of the request, if enabled.
|====== |======
.REST realm_authentication_failed Attributes .REST realm_authentication_failed attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -146,7 +147,7 @@ The log level determines which attributes are included in a log entry.
consulted realm. consulted realm.
|====== |======
.Transport anonymous_access_denied Attributes .Transport anonymous_access_denied attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -161,7 +162,7 @@ The log level determines which attributes are included in a log entry.
pertains to (when applicable). pertains to (when applicable).
|====== |======
.Transport authentication_success Attributes .Transport authentication_success attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -176,7 +177,7 @@ The log level determines which attributes are included in a log entry.
| `request` | The type of request that was executed. | `request` | The type of request that was executed.
|====== |======
.Transport authentication_failed Attributes .Transport authentication_failed attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -192,7 +193,7 @@ The log level determines which attributes are included in a log entry.
pertains to (when applicable). pertains to (when applicable).
|====== |======
.Transport realm_authentication_failed Attributes .Transport realm_authentication_failed attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -211,7 +212,7 @@ The log level determines which attributes are included in a log entry.
consulted realm. consulted realm.
|====== |======
.Transport access_granted Attributes .Transport access_granted attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -228,7 +229,7 @@ The log level determines which attributes are included in a log entry.
pertains to (when applicable). pertains to (when applicable).
|====== |======
.Transport access_denied Attributes .Transport access_denied attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -245,7 +246,7 @@ The log level determines which attributes are included in a log entry.
relates to (when applicable). relates to (when applicable).
|====== |======
.Transport tampered_request Attributes .Transport tampered_request attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -261,7 +262,7 @@ The log level determines which attributes are included in a log entry.
pertains to (when applicable). pertains to (when applicable).
|====== |======
.IP Filter connection_granted Attributes .IP filter connection_granted attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -271,7 +272,7 @@ The log level determines which attributes are included in a log entry.
the request. the request.
|====== |======
.IP Filter connection_denied Attributes .IP filter connection_denied attributes
[cols="2,7",options="header"] [cols="2,7",options="header"]
|====== |======
| Attribute | Description | Attribute | Description
@ -283,14 +284,14 @@ The log level determines which attributes are included in a log entry.
[float] [float]
[[audit-log-output]] [[audit-log-output]]
=== Logfile Audit Output === Logfile audit output
The `logfile` audit output is the default output for auditing. It writes data to The `logfile` audit output is the default output for auditing. It writes data to
the `<clustername>_access.log` file in the logs directory. the `<clustername>_access.log` file in the logs directory.
[float] [float]
[[audit-log-entry-format]] [[audit-log-entry-format]]
=== Log Entry Format === Log entry format
The format of a log entry is: The format of a log entry is:
@ -318,7 +319,7 @@ The format of a log entry is:
[float] [float]
[[audit-log-settings]] [[audit-log-settings]]
=== Logfile Output Settings === Logfile output settings
The events and some other information about what gets logged can be The events and some other information about what gets logged can be
controlled using settings in the `elasticsearch.yml` file. See controlled using settings in the `elasticsearch.yml` file. See
@ -330,13 +331,13 @@ audited in plain text when including the request body in audit events.
[[logging-file]] [[logging-file]]
You can also configure how the logfile is written in the `log4j2.properties` You can also configure how the logfile is written in the `log4j2.properties`
file located in `CONFIG_DIR/x-pack`. By default, audit information is appended to the file located in `CONFIG_DIR`. By default, audit information is appended to the
`<clustername>_access.log` file located in the standard Elasticsearch `logs` directory `<clustername>_access.log` file located in the standard Elasticsearch `logs` directory
(typically located at `$ES_HOME/logs`). The file rolls over on a daily basis. (typically located at `$ES_HOME/logs`). The file rolls over on a daily basis.
[float] [float]
[[audit-log-ignore-policy]] [[audit-log-ignore-policy]]
=== Logfile Audit Events Ignore Policies === Logfile audit events ignore policies
The comprehensive audit trail is necessary to ensure accountability. It offers tremendous The comprehensive audit trail is necessary to ensure accountability. It offers tremendous
value during incident response and can even be required for demonstrating compliance. value during incident response and can even be required for demonstrating compliance.
@ -414,7 +415,7 @@ xpack.security.audit.logfile.events.ignore_filters:
[float] [float]
[[audit-index]] [[audit-index]]
=== Index Audit Output === Index audit output
In addition to logging to a file, you can store audit logs in Elasticsearch In addition to logging to a file, you can store audit logs in Elasticsearch
rolling indices. These indices can be either on the same cluster, or on a rolling indices. These indices can be either on the same cluster, or on a
@ -429,13 +430,13 @@ xpack.security.audit.outputs: [ index, logfile ]
---------------------------- ----------------------------
For more configuration options, see For more configuration options, see
{ref}/auditing-settings.html#index-audit-settings[Audit Log Indexing Configuration Settings]. {ref}/auditing-settings.html#index-audit-settings[Audit log indexing configuration settings].
IMPORTANT: No filtering is performed when auditing, so sensitive data may be IMPORTANT: No filtering is performed when auditing, so sensitive data may be
audited in plain text when including the request body in audit events. audited in plain text when including the request body in audit events.
[float] [float]
==== Audit Index Settings ==== Audit index settings
You can also configure settings for the indices that the events are stored in. You can also configure settings for the indices that the events are stored in.
These settings are configured in the `xpack.security.audit.index.settings` namespace These settings are configured in the `xpack.security.audit.index.settings` namespace
@ -451,7 +452,7 @@ xpack.security.audit.index.settings:
---------------------------- ----------------------------
[float] [float]
==== Forwarding Audit Logs to a Remote Cluster ==== Forwarding audit logs to a remote cluster
To index audit events to a remote Elasticsearch cluster, you configure To index audit events to a remote Elasticsearch cluster, you configure
the following `xpack.security.audit.index.client` settings: the following `xpack.security.audit.index.client` settings:

View File

@ -1,3 +1,4 @@
[role="xpack"]
[[active-directory-realm]] [[active-directory-realm]]
=== Active Directory user authentication === Active Directory user authentication

View File

@ -1,5 +1,6 @@
[role="xpack"]
[[anonymous-access]] [[anonymous-access]]
=== Enabling Anonymous Access === Enabling anonymous access
Incoming requests are considered to be _anonymous_ if no authentication token Incoming requests are considered to be _anonymous_ if no authentication token
can be extracted from the incoming request. By default, anonymous requests are rejected and an authentication error is returned (status code `401`). can be extracted from the incoming request. By default, anonymous requests are rejected and an authentication error is returned (status code `401`).

Some files were not shown because too many files have changed in this diff Show More