Merge remote-tracking branch 'es/master' into ccr

* es/master:
  Add remote cluster client (#29495)
  Ensure flush happens on shard idle
  Adds SpanGapQueryBuilder in the query DSL (#28636)
  Control max size and count of warning headers (#28427)
  Make index APIs work without types. (#29479)
  Deprecate filtering on `_type`. (#29468)
  Fix auto-generated ID example format (#29461)
  Fix typo in max number of threads check docs (#29469)
  Add primary term to translog header (#29227)
  Add a helper method to get a random java.util.TimeZone (#29487)
  Move TimeValue into elasticsearch-core project (#29486)
  Fix NPE in InternalGeoCentroidTests#testReduceRandom (#29481)
  Build: introduce keystoreFile for cluster config (#29491)
  test: Index more docs, so that it is less likely the search request does not time out.
This commit is contained in:
Martijn van Groningen 2018-04-13 15:31:43 +02:00
commit 9da3e739fb
No known key found for this signature in database
GPG Key ID: AB236F4FCF2AF12A
98 changed files with 1722 additions and 678 deletions

View File

@ -141,6 +141,8 @@ class ClusterConfiguration {
Map<String, String> keystoreSettings = new HashMap<>()
Map<String, Object> keystoreFiles = new HashMap<>()
// map from destination path, to source file
Map<String, Object> extraConfigFiles = new HashMap<>()
@ -167,6 +169,15 @@ class ClusterConfiguration {
keystoreSettings.put(name, value)
}
/**
* Adds a file to the keystore. The name is the secure setting name, and the sourceFile
* is anything accepted by project.file()
*/
@Input
void keystoreFile(String name, Object sourceFile) {
keystoreFiles.put(name, sourceFile)
}
@Input
void plugin(String path) {
Project pluginProject = project.project(path)

View File

@ -180,6 +180,7 @@ class ClusterFormationTasks {
setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode)
setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node)
setup = configureAddKeystoreSettingTasks(prefix, project, setup, node)
setup = configureAddKeystoreFileTasks(prefix, project, setup, node)
if (node.config.plugins.isEmpty() == false) {
if (node.nodeVersion == VersionProperties.elasticsearch) {
@ -323,7 +324,7 @@ class ClusterFormationTasks {
/** Adds a task to create keystore */
static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) {
if (node.config.keystoreSettings.isEmpty()) {
if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) {
return setup
} else {
/*
@ -357,6 +358,37 @@ class ClusterFormationTasks {
return parentTask
}
/** Adds tasks to add files to the keystore */
static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) {
Map<String, Object> kvs = node.config.keystoreFiles
if (kvs.isEmpty()) {
return setup
}
Task parentTask = setup
/*
* We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting
* the short name requiring the path to already exist.
*/
final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}"
for (Map.Entry<String, Object> entry in kvs) {
String key = entry.getKey()
String name = taskName(parent, node, 'addToKeystore#' + key)
String srcFileName = entry.getValue()
Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName)
t.doFirst {
File srcFile = project.file(srcFileName)
if (srcFile.isDirectory()) {
throw new GradleException("Source for keystoreFile must be a file: ${srcFile}")
}
if (srcFile.exists() == false) {
throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}")
}
}
parentTask = t
}
return parentTask
}
static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) {
if (node.config.extraConfigFiles.isEmpty()) {
return setup

View File

@ -239,7 +239,7 @@ their last name:
[source,js]
----------------------------------------------------------------
POST hockey/player/_update_by_query
POST hockey/_update_by_query
{
"script": {
"lang": "painless",
@ -260,7 +260,7 @@ names start with a consonant and end with a vowel:
[source,js]
----------------------------------------------------------------
POST hockey/player/_update_by_query
POST hockey/_update_by_query
{
"script": {
"lang": "painless",
@ -281,7 +281,7 @@ remove all of the vowels in all of their last names:
[source,js]
----------------------------------------------------------------
POST hockey/player/_update_by_query
POST hockey/_update_by_query
{
"script": {
"lang": "painless",
@ -297,7 +297,7 @@ method so it supports `$1` and `\1` for replacements:
[source,js]
----------------------------------------------------------------
POST hockey/player/_update_by_query
POST hockey/_update_by_query
{
"script": {
"lang": "painless",
@ -319,7 +319,7 @@ This will make all of the vowels in the hockey player's last names upper case:
[source,js]
----------------------------------------------------------------
POST hockey/player/_update_by_query
POST hockey/_update_by_query
{
"script": {
"lang": "painless",
@ -337,7 +337,7 @@ last names upper case:
[source,js]
----------------------------------------------------------------
POST hockey/player/_update_by_query
POST hockey/_update_by_query
{
"script": {
"lang": "painless",

View File

@ -38,7 +38,7 @@ Example:
[source,js]
--------------------------------------------------
GET news/article/_search
GET news/_search
{
"query" : {
"match" : {"content" : "Bird flu"}
@ -153,7 +153,7 @@ We can drill down into examples of these documents to see why pozmantier is conn
[source,js]
--------------------------------------------------
GET news/article/_search
GET news/_search
{
"query": {
"simple_query_string": {
@ -221,7 +221,7 @@ with the `filter_duplicate_text` setting turned on:
[source,js]
--------------------------------------------------
GET news/article/_search
GET news/_search
{
"query": {
"match": {
@ -424,7 +424,7 @@ context:
[source,js]
--------------------------------------------------
GET news/article/_search
GET news/_search
{
"query" : {
"match" : {
@ -463,7 +463,7 @@ will be analyzed using the `source_fields` parameter:
[source,js]
--------------------------------------------------
GET news/article/_search
GET news/_search
{
"query" : {
"match" : {

View File

@ -217,7 +217,7 @@ had a value.
[source,js]
--------------------------------------------------
GET latency/data/_search
GET latency/_search
{
"size": 0,
"aggs" : {

View File

@ -75,7 +75,7 @@ Back to the API format, this will delete tweets from the `twitter` index:
[source,js]
--------------------------------------------------
POST twitter/_doc/_delete_by_query?conflicts=proceed
POST twitter/_delete_by_query?conflicts=proceed
{
"query": {
"match_all": {}
@ -85,12 +85,12 @@ POST twitter/_doc/_delete_by_query?conflicts=proceed
// CONSOLE
// TEST[setup:twitter]
It's also possible to delete documents of multiple indexes and multiple
types at once, just like the search API:
It's also possible to delete documents of multiple indexes at once, just like
the search API:
[source,js]
--------------------------------------------------
POST twitter,blog/_docs,post/_delete_by_query
POST twitter,blog/_delete_by_query
{
"query": {
"match_all": {}

View File

@ -229,14 +229,14 @@ The result of the above index operation is:
},
"_index" : "twitter",
"_type" : "_doc",
"_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32",
"_id" : "W0tpsmIBdwcYyG50zbta",
"_version" : 1,
"_seq_no" : 0,
"_primary_term" : 1,
"result": "created"
}
--------------------------------------------------
// TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/]
// TESTRESPONSE[s/W0tpsmIBdwcYyG50zbta/$body._id/ s/"successful" : 2/"successful" : 1/]
[float]
[[index-routing]]

View File

@ -67,7 +67,7 @@ Back to the API format, this will update tweets from the `twitter` index:
[source,js]
--------------------------------------------------
POST twitter/_doc/_update_by_query?conflicts=proceed
POST twitter/_update_by_query?conflicts=proceed
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
@ -145,12 +145,12 @@ This API doesn't allow you to move the documents it touches, just modify their
source. This is intentional! We've made no provisions for removing the document
from its original location.
It's also possible to do this whole thing on multiple indexes and multiple
types at once, just like the search API:
It's also possible to do this whole thing on multiple indexes at once, just
like the search API:
[source,js]
--------------------------------------------------
POST twitter,blog/_doc,post/_update_by_query
POST twitter,blog/_update_by_query
--------------------------------------------------
// CONSOLE
// TEST[s/^/PUT twitter\nPUT blog\n/]

View File

@ -82,4 +82,4 @@ Enable or disable allocation for persistent tasks:
This setting does not affect the persistent tasks that are already being executed.
Only newly created persistent tasks, or tasks that must be reassigned (after a node
left the cluster, for example), are impacted by this setting.
--
--

View File

@ -20,7 +20,7 @@ http://en.wikipedia.org/wiki/Chunked_transfer_encoding[HTTP chunking].
The settings in the table below can be configured for HTTP. Note that none of
them are dynamically updatable so for them to take effect they should be set in
`elasticsearch.yml`.
the Elasticsearch <<settings, configuration file>>.
[cols="<,<",options="header",]
|=======================================================================
@ -100,6 +100,12 @@ simple message will be returned. Defaults to `true`
|`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`.
|`http.max_warning_header_count` |The maximum number of warning headers in
client HTTP responses, defaults to unbounded.
|`http.max_warning_header_size` |The maximum total size of warning headers in
client HTTP responses, defaults to unbounded.
|=======================================================================
It also uses the common

View File

@ -12,8 +12,7 @@ that match the query. The query can either be provided using a simple
All search APIs can be applied across multiple types within an index, and
across multiple indices with support for the
<<multi-index,multi index syntax>>. For
example, we can search on all documents across all types within the
twitter index:
example, we can search on all documents within the twitter index:
[source,js]
--------------------------------------------------
@ -22,15 +21,6 @@ GET /twitter/_search?q=user:kimchy
// CONSOLE
// TEST[setup:twitter]
We can also search within specific types:
[source,js]
--------------------------------------------------
GET /twitter/tweet,user/_search?q=user:kimchy
--------------------------------------------------
// CONSOLE
// TEST[setup:twitter]
We can also search all tweets with a certain tag across several indices
(for example, when each user has his own index):

View File

@ -114,7 +114,7 @@ that the Elasticsearch process has the rights to create enough threads
under normal use. This check is enforced only on Linux. If you are on
Linux, to pass the maximum number of threads check, you must configure
your system to allow the Elasticsearch process the ability to create at
least 2048 threads. This can be done via `/etc/security/limits.conf`
least 4096 threads. This can be done via `/etc/security/limits.conf`
using the `nproc` setting (note that you might have to increase the
limits for the `root` user too).

View File

@ -19,15 +19,12 @@
package org.elasticsearch.common.unit;
import org.elasticsearch.common.xcontent.ToXContentFragment;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
import java.util.Locale;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
public class TimeValue implements Comparable<TimeValue>, ToXContentFragment {
public class TimeValue implements Comparable<TimeValue> {
/** How many nano-seconds in one milli-second */
public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS);
@ -352,9 +349,4 @@ public class TimeValue implements Comparable<TimeValue>, ToXContentFragment {
double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1);
return Double.compare(thisValue, otherValue);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.value(toString());
}
}

View File

@ -19,15 +19,10 @@
package org.elasticsearch.common.unit;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.common.unit.TimeValue.timeValueNanos;
import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.Matchers.containsString;
@ -154,31 +149,6 @@ public class TimeValueTests extends ESTestCase {
return randomFrom("nanos", "micros", "ms", "s", "m", "h", "d");
}
private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
out.writeTimeValue(value);
assertEquals(expectedSize, out.size());
StreamInput in = out.bytes().streamInput();
TimeValue inValue = in.readTimeValue();
assertThat(inValue, equalTo(value));
assertThat(inValue.duration(), equalTo(value.duration()));
assertThat(inValue.timeUnit(), equalTo(value.timeUnit()));
}
public void testSerialize() throws Exception {
assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3);
assertEqualityAfterSerialize(timeValueNanos(-1), 2);
assertEqualityAfterSerialize(timeValueNanos(1), 2);
assertEqualityAfterSerialize(timeValueSeconds(30), 2);
final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values()));
BytesStreamOutput out = new BytesStreamOutput();
out.writeZLong(timeValue.duration());
assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length());
}
public void testFailOnUnknownUnits() {
try {
TimeValue.parseTimeValue("23tw", null, "test");

View File

@ -35,14 +35,12 @@
- do:
search:
index: empty_bucket_idx
type: test
- match: {hits.total: 2}
- do:
search:
index: empty_bucket_idx
type: test
body: {"aggs": {"histo": {"histogram": {"field": "val1", "interval": 1, "min_doc_count": 0}, "aggs": { "mfs" : { "matrix_stats": {"fields": ["value", "val1"]} } } } } }
- match: {hits.total: 2}

View File

@ -130,7 +130,6 @@ setup:
- do:
search:
index: unmapped
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"]} } } }
- match: {hits.total: 0}
@ -142,7 +141,6 @@ setup:
- do:
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3"]} } } }
- match: {hits.total: 15}
@ -155,7 +153,6 @@ setup:
- do:
search:
index: [test, unmapped]
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"]} } } }
- match: {hits.total: 15}
@ -169,7 +166,6 @@ setup:
- do:
search:
index: [test, unmapped]
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"], "missing" : {"val2" : 10} } } } }
- match: {hits.total: 15}
@ -184,7 +180,6 @@ setup:
catch: /parsing_exception/
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } }
---
@ -194,5 +189,4 @@ setup:
catch: /parsing_exception/
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } }

View File

@ -130,7 +130,6 @@ setup:
- do:
search:
index: unmapped
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } }
- match: {hits.total: 0}
@ -142,7 +141,6 @@ setup:
- do:
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "max"} } } }
- match: {hits.total: 15}
@ -156,7 +154,6 @@ setup:
- do:
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "min"} } } }
- match: {hits.total: 15}
@ -170,7 +167,6 @@ setup:
- do:
search:
index: [test, unmapped]
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } }
- match: {hits.total: 15}
@ -184,7 +180,6 @@ setup:
- do:
search:
index: [test, unmapped]
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"], "missing" : {"val2" : 10, "vals" : 5 } } } } }
- match: {hits.total: 15}
@ -199,7 +194,6 @@ setup:
catch: /parsing_exception/
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["vals", "val3"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } }
---
@ -209,5 +203,4 @@ setup:
catch: /parsing_exception/
search:
index: test
type: test
body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3", "vals"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } }

View File

@ -100,9 +100,7 @@ task createServiceAccountFile() {
integTestCluster {
dependsOn createServiceAccountFile, googleCloudStorageFixture
setupCommand 'create-elasticsearch-keystore', 'bin/elasticsearch-keystore', 'create'
setupCommand 'add-credentials-to-elasticsearch-keystore',
'bin/elasticsearch-keystore', 'add-file', 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}"
keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}"
/* Use a closure on the string to delay evaluation until tests are executed */
setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }"

View File

@ -4,7 +4,7 @@
"methods": ["POST", "PUT"],
"url": {
"path": "/{index}/{type}",
"paths": ["/{index}/{type}", "/{index}/{type}/{id}"],
"paths": ["/{index}/{type}", "/{index}/{type}/{id}", "/{index}/_doc/{id}", "/{index}/_doc"],
"parts": {
"id": {
"type" : "string",
@ -17,7 +17,6 @@
},
"type": {
"type" : "string",
"required" : true,
"description" : "The type of the document"
}
},

View File

@ -18,7 +18,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
@ -36,7 +35,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 2 }}
@ -56,7 +54,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
@ -76,7 +73,6 @@
- do:
search:
index: create_60_refresh_1
type: test
body:
query: { term: { _id: create_60_refresh_id1 }}
- match: { hits.total: 1 }

View File

@ -37,7 +37,6 @@
- do:
search:
index: test_1
type: test
body:
query: { terms: { _id: [1,3] }}
@ -52,7 +51,6 @@
- do:
search:
index: test_1
type: test
body:
query: { terms: { _id: [1,3] }}
@ -72,7 +70,6 @@
- do:
search:
index: test_1
type: test
body:
query: { terms: { _id: [1,3] }}
@ -92,7 +89,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
- match: { hits.total: 1 }
@ -107,7 +103,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
- match: { hits.total: 0 }
@ -126,7 +121,6 @@
- do:
search:
index: delete_50_refresh_1
type: test
body:
query: { term: { _id: delete_50_refresh_id1 }}
- match: { hits.total: 1 }
@ -142,7 +136,6 @@
- do:
search:
index: delete_50_refresh_1
type: test
body:
query: { term: { _id: delete_50_refresh_id1 }}
- match: { hits.total: 0 }

View File

@ -19,7 +19,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
@ -37,7 +36,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 2 }}
@ -57,7 +55,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
@ -77,7 +74,6 @@
- do:
search:
index: index_60_refresh_1
type: test
body:
query: { term: { _id: index_60_refresh_id1 }}
- match: { hits.total: 1 }

View File

@ -39,6 +39,45 @@
- match: { index.mappings.properties.foo.type: "keyword" }
- match: { index.mappings.properties.bar.type: "float" }
# Explicit id
- do:
index:
index: index
id: 1
body: { foo: bar }
# Implicit id
- do:
index:
index: index
body: { foo: bar }
# Bulk with explicit id
- do:
bulk:
index: index
body: |
{ "index": { "_id": "2" } }
{ "doc": { "foo": "baz" } }
# Bulk with implicit id
- do:
bulk:
index: index
body: |
{ "index": { } }
{ "doc": { "foo": "baz" } }
- do:
indices.refresh:
index: index
- do:
count:
index: index
- match: { count: 4 }
---
"PUT mapping with a type and include_type_name: false":

View File

@ -70,7 +70,6 @@
- do:
search:
index: logs_search
type: test
- match: { hits.total: 1 }
- match: { hits.hits.0._index: "logs-000002"}

View File

@ -93,7 +93,6 @@
- do:
search:
index: test
type: test
body:
sort: ["rank"]
size: 1
@ -105,7 +104,6 @@
- do:
search:
index: test
type: test
body:
sort: ["rank"]
query: {"range": { "rank": { "from": 0 } } }
@ -128,7 +126,6 @@
- do:
search:
index: test
type: test
body:
sort: _doc
@ -146,7 +143,6 @@
- do:
search:
index: test
type: test
body:
sort: ["rank"]
query: {"range": { "rank": { "from": 0 } } }
@ -163,7 +159,6 @@
catch: /disabling \[track_total_hits\] is not allowed in a scroll context/
search:
index: test
type: test
scroll: 1m
body:
sort: ["rank"]

View File

@ -32,7 +32,6 @@
- do:
search:
index: test_1
type: test
body:
query:
more_like_this:

View File

@ -37,7 +37,6 @@
- do:
search:
index: test_1
type: test
body:
query:
more_like_this:

View File

@ -37,7 +37,6 @@
- do:
search:
index: test_1
type: test
body:
query:
more_like_this:

View File

@ -67,14 +67,12 @@
- do:
search:
index: goodbad
type: doc
- match: {hits.total: 7}
- do:
search:
index: goodbad
type: doc
body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_terms": {"significant_terms": {"field": "text"}}}}}}
- match: {aggregations.class.buckets.0.sig_terms.buckets.0.key: "bad"}

View File

@ -72,14 +72,12 @@
- do:
search:
index: goodbad
type: doc
- match: {hits.total: 7}
- do:
search:
index: goodbad
type: doc
body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text"}}}}}}
- match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"}
@ -159,14 +157,12 @@
- do:
search:
index: goodbad
type: doc
- match: {hits.total: 7}
- do:
search:
index: goodbad
type: doc
body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text", "filter_duplicate_text": true}}}}}}
- match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"}

View File

@ -64,7 +64,6 @@ setup:
- do:
search:
index: test
type: test
body:
collapse: { field: numeric_group }
sort: [{ sort: desc }]
@ -100,7 +99,6 @@ setup:
- do:
search:
index: test
type: test
body:
from: 2
collapse: { field: numeric_group }
@ -125,7 +123,6 @@ setup:
- do:
search:
index: test
type: test
body:
collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } }
sort: [{ sort: desc }]
@ -169,7 +166,6 @@ setup:
- do:
search:
index: test
type: test
body:
collapse: { field: numeric_group, max_concurrent_group_searches: 10, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } }
sort: [{ sort: desc }]
@ -215,7 +211,6 @@ setup:
catch: /cannot use \`collapse\` in a scroll context/
search:
index: test
type: test
scroll: 1s
body:
collapse: { field: numeric_group }
@ -231,7 +226,6 @@ setup:
catch: /cannot use \`collapse\` in conjunction with \`search_after\`/
search:
index: test
type: test
body:
collapse: { field: numeric_group }
search_after: [6]
@ -248,7 +242,6 @@ setup:
catch: /cannot use \`collapse\` in conjunction with \`rescore\`/
search:
index: test
type: test
body:
collapse: { field: numeric_group }
rescore:
@ -269,7 +262,6 @@ setup:
- do:
search:
index: test
type: test
body:
size: 0
collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} }
@ -288,7 +280,6 @@ setup:
- do:
search:
index: test
type: test
body:
collapse:
field: numeric_group
@ -345,7 +336,6 @@ setup:
- do:
search:
index: test
type: test
body:
collapse: { field: numeric_group, inner_hits: { name: sub_hits, version: true, size: 2, sort: [{ sort: asc }] } }
sort: [{ sort: desc }]

View File

@ -29,7 +29,6 @@ setup:
- do:
search:
index: _all
type: test
body:
query:
match:
@ -40,7 +39,6 @@ setup:
- do:
search:
index: test_1
type: test
body:
query:
match:
@ -54,7 +52,6 @@ setup:
- do:
search:
index: test_2
type: test
body:
query:
match:

View File

@ -36,7 +36,6 @@ setup:
- do:
search:
index: test
type: test
body:
size: 1
query:
@ -54,7 +53,6 @@ setup:
- do:
search:
index: test
type: test
body:
size: 1
query:
@ -73,7 +71,6 @@ setup:
- do:
search:
index: test
type: test
body:
size: 1
query:
@ -92,7 +89,6 @@ setup:
- do:
search:
index: test
type: test
body:
size: 1
query:

View File

@ -21,7 +21,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 1 }}
@ -41,7 +40,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { _id: 2 }}
@ -71,7 +69,6 @@
- do:
search:
index: test_1
type: test
body:
query: { term: { cat: dog }}
@ -91,7 +88,6 @@
- do:
search:
index: update_60_refresh_1
type: test
body:
query: { term: { _id: update_60_refresh_id1 }}
- match: { hits.total: 1 }
@ -109,7 +105,6 @@
- do:
search:
index: update_60_refresh_1
type: test
body:
query: { match: { test: asdf } }
- match: { hits.total: 1 }

View File

@ -224,7 +224,9 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
* @deprecated Types are going away, prefer filtering on a type.
*/
@Deprecated
public SearchRequest types(String... types) {
Objects.requireNonNull(types, "types must not be null");
for (String type : types) {

View File

@ -61,7 +61,9 @@ public class SearchRequestBuilder extends ActionRequestBuilder<SearchRequest, Se
/**
* The document types to execute the search against. Defaults to be executed against
* all types.
* @deprecated Types are going away, prefer filtering on a field.
*/
@Deprecated
public SearchRequestBuilder setTypes(String... types) {
request.types(types);
return this;

View File

@ -477,4 +477,14 @@ public interface Client extends ElasticsearchClient, Releasable {
* issued from it.
*/
Client filterWithHeader(Map<String, String> headers);
/**
* Returns a client to a remote cluster with the given cluster alias.
*
* @throws IllegalArgumentException if the given clusterAlias doesn't exist
* @throws UnsupportedOperationException if this functionality is not available on this client.
*/
default Client getRemoteClusterClient(String clusterAlias) {
throw new UnsupportedOperationException("this client doesn't support remote cluster connections");
}
}

View File

@ -73,4 +73,9 @@ public abstract class FilterClient extends AbstractClient {
protected Client in() {
return in;
}
@Override
public Client getRemoteClusterClient(String clusterAlias) {
return in.getRemoteClusterClient(clusterAlias);
}
}

View File

@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.tasks.TaskListener;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.RemoteClusterService;
import java.util.Map;
import java.util.function.Supplier;
@ -48,14 +49,17 @@ public class NodeClient extends AbstractClient {
* {@link #executeLocally(GenericAction, ActionRequest, TaskListener)}.
*/
private Supplier<String> localNodeId;
private RemoteClusterService remoteClusterService;
public NodeClient(Settings settings, ThreadPool threadPool) {
super(settings, threadPool);
}
public void initialize(Map<GenericAction, TransportAction> actions, Supplier<String> localNodeId) {
public void initialize(Map<GenericAction, TransportAction> actions, Supplier<String> localNodeId,
RemoteClusterService remoteClusterService) {
this.actions = actions;
this.localNodeId = localNodeId;
this.remoteClusterService = remoteClusterService;
}
@Override
@ -117,4 +121,9 @@ public class NodeClient extends AbstractClient {
}
return transportAction;
}
@Override
public Client getRemoteClusterClient(String clusterAlias) {
return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias);
}
}

View File

@ -245,6 +245,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH,
HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT,
HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE,
HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH,
HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT,
HttpTransportSettings.SETTING_HTTP_RESET_COOKIES,

View File

@ -23,10 +23,16 @@ import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.http.HttpTransportSettings;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT;
import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE;
import java.io.Closeable;
import java.io.IOException;
@ -39,13 +45,14 @@ import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CancellationException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.FutureTask;
import java.util.concurrent.RunnableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import java.nio.charset.StandardCharsets;
/**
* A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with
@ -81,6 +88,8 @@ public final class ThreadContext implements Closeable, Writeable {
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct();
private final Map<String, String> defaultHeader;
private final ContextThreadLocal threadLocal;
private final int maxWarningHeaderCount;
private final long maxWarningHeaderSize;
/**
* Creates a new ThreadContext instance
@ -98,6 +107,8 @@ public final class ThreadContext implements Closeable, Writeable {
this.defaultHeader = Collections.unmodifiableMap(defaultHeader);
}
threadLocal = new ContextThreadLocal();
this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings);
this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes();
}
@Override
@ -282,7 +293,7 @@ public final class ThreadContext implements Closeable, Writeable {
* @param uniqueValue the function that produces de-duplication values
*/
public void addResponseHeader(final String key, final String value, final Function<String, String> uniqueValue) {
threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue));
threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize));
}
/**
@ -359,7 +370,7 @@ public final class ThreadContext implements Closeable, Writeable {
private final Map<String, Object> transientHeaders;
private final Map<String, List<String>> responseHeaders;
private final boolean isSystemContext;
private long warningHeadersSize; //saving current warning headers' size not to recalculate the size with every new warning header
private ThreadContextStruct(StreamInput in) throws IOException {
final int numRequest = in.readVInt();
Map<String, String> requestHeaders = numRequest == 0 ? Collections.emptyMap() : new HashMap<>(numRequest);
@ -371,6 +382,7 @@ public final class ThreadContext implements Closeable, Writeable {
this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString);
this.transientHeaders = Collections.emptyMap();
isSystemContext = false; // we never serialize this it's a transient flag
this.warningHeadersSize = 0L;
}
private ThreadContextStruct setSystemContext() {
@ -387,6 +399,18 @@ public final class ThreadContext implements Closeable, Writeable {
this.responseHeaders = responseHeaders;
this.transientHeaders = transientHeaders;
this.isSystemContext = isSystemContext;
this.warningHeadersSize = 0L;
}
private ThreadContextStruct(Map<String, String> requestHeaders,
Map<String, List<String>> responseHeaders,
Map<String, Object> transientHeaders, boolean isSystemContext,
long warningHeadersSize) {
this.requestHeaders = requestHeaders;
this.responseHeaders = responseHeaders;
this.transientHeaders = transientHeaders;
this.isSystemContext = isSystemContext;
this.warningHeadersSize = warningHeadersSize;
}
/**
@ -440,30 +464,58 @@ public final class ThreadContext implements Closeable, Writeable {
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext);
}
private ThreadContextStruct putResponse(final String key, final String value, final Function<String, String> uniqueValue) {
private ThreadContextStruct putResponse(final String key, final String value, final Function<String, String> uniqueValue,
final int maxWarningHeaderCount, final long maxWarningHeaderSize) {
assert value != null;
long newWarningHeaderSize = warningHeadersSize;
//check if we can add another warning header - if max size within limits
if (key.equals("Warning") && (maxWarningHeaderSize != -1)) { //if size is NOT unbounded, check its limits
if (warningHeadersSize > maxWarningHeaderSize) { // if max size has already been reached before
final String message = "Dropping a warning header, as their total size reached the maximum allowed of [" +
maxWarningHeaderSize + "] bytes set in [" +
HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!";
ESLoggerFactory.getLogger(ThreadContext.class).warn(message);
return this;
}
newWarningHeaderSize += "Warning".getBytes(StandardCharsets.UTF_8).length + value.getBytes(StandardCharsets.UTF_8).length;
if (newWarningHeaderSize > maxWarningHeaderSize) {
final String message = "Dropping a warning header, as their total size reached the maximum allowed of [" +
maxWarningHeaderSize + "] bytes set in [" +
HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!";
ESLoggerFactory.getLogger(ThreadContext.class).warn(message);
return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize);
}
}
final Map<String, List<String>> newResponseHeaders = new HashMap<>(this.responseHeaders);
final List<String> existingValues = newResponseHeaders.get(key);
if (existingValues != null) {
final Set<String> existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet());
assert existingValues.size() == existingUniqueValues.size();
if (existingUniqueValues.contains(uniqueValue.apply(value))) {
return this;
}
final List<String> newValues = new ArrayList<>(existingValues);
newValues.add(value);
newResponseHeaders.put(key, Collections.unmodifiableList(newValues));
} else {
newResponseHeaders.put(key, Collections.singletonList(value));
}
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext);
//check if we can add another warning header - if max count within limits
if ((key.equals("Warning")) && (maxWarningHeaderCount != -1)) { //if count is NOT unbounded, check its limits
final int warningHeaderCount = newResponseHeaders.containsKey("Warning") ? newResponseHeaders.get("Warning").size() : 0;
if (warningHeaderCount > maxWarningHeaderCount) {
final String message = "Dropping a warning header, as their total count reached the maximum allowed of [" +
maxWarningHeaderCount + "] set in [" + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT.getKey() + "]!";
ESLoggerFactory.getLogger(ThreadContext.class).warn(message);
return this;
}
}
return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize);
}
private ThreadContextStruct putTransient(String key, Object value) {
Map<String, Object> newTransient = new HashMap<>(this.transientHeaders);
if (newTransient.putIfAbsent(key, value) != null) {

View File

@ -61,6 +61,7 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension
writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v)));
writers.put(MutableDateTime.class, XContentBuilder::timeValue);
writers.put(DateTime.class, XContentBuilder::timeValue);
writers.put(TimeValue.class, (b, v) -> b.value(v.toString()));
writers.put(BytesReference.class, (b, v) -> {
if (v == null) {

View File

@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
import static java.util.Collections.emptyList;
@ -93,6 +92,10 @@ public final class HttpTransportSettings {
Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_HEADER_SIZE =
Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope);
public static final Setting<Integer> SETTING_HTTP_MAX_WARNING_HEADER_COUNT =
Setting.intSetting("http.max_warning_header_count", -1, -1, Property.NodeScope);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_WARNING_HEADER_SIZE =
Setting.byteSizeSetting("http.max_warning_header_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<ByteSizeValue> SETTING_HTTP_MAX_INITIAL_LINE_LENGTH =
Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope);
// don't reset cookies by default, since I don't think we really need to

View File

@ -185,7 +185,7 @@ public final class IndexSettings {
public static final Setting<ByteSizeValue> INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING =
Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB),
/*
* An empty translog occupies 43 bytes on disk. If the flush threshold is below this, the flush thread
* An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread
* can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing.
* However, small thresholds are useful for testing so we do not add a large lower bound here.
*/
@ -220,7 +220,7 @@ public final class IndexSettings {
"index.translog.generation_threshold_size",
new ByteSizeValue(64, ByteSizeUnit.MB),
/*
* An empty translog occupies 43 bytes on disk. If the generation threshold is
* An empty translog occupies 55 bytes on disk. If the generation threshold is
* below this, the flush thread can get stuck in an infinite loop repeatedly
* rolling the generation as every new generation will already exceed the
* generation threshold. However, small thresholds are useful for testing so we

View File

@ -1068,14 +1068,13 @@ public abstract class Engine implements Closeable {
this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp;
}
public Index(Term uid, ParsedDocument doc) {
this(uid, doc, Versions.MATCH_ANY);
public Index(Term uid, long primaryTerm, ParsedDocument doc) {
this(uid, primaryTerm, doc, Versions.MATCH_ANY);
} // TEST ONLY
Index(Term uid, ParsedDocument doc, long version) {
// use a primary term of 2 to allow tests to reduce it to a valid >0 term
this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 2, version, VersionType.INTERNAL,
Origin.PRIMARY, System.nanoTime(), -1, false);
Index(Term uid, long primaryTerm, ParsedDocument doc, long version) {
this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL,
Origin.PRIMARY, System.nanoTime(), -1, false);
} // TEST ONLY
public ParsedDocument parsedDoc() {
@ -1145,8 +1144,8 @@ public abstract class Engine implements Closeable {
this.id = Objects.requireNonNull(id);
}
public Delete(String type, String id, Term uid) {
this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
public Delete(String type, String id, Term uid, long primaryTerm) {
this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime());
}
public Delete(Delete template, VersionType versionType) {

View File

@ -79,6 +79,7 @@ public final class EngineConfig {
@Nullable
private final CircuitBreakerService circuitBreakerService;
private final LongSupplier globalCheckpointSupplier;
private final LongSupplier primaryTermSupplier;
/**
* Index setting to change the low level lucene codec used for writing new segments.
@ -125,7 +126,7 @@ public final class EngineConfig {
List<ReferenceManager.RefreshListener> externalRefreshListener,
List<ReferenceManager.RefreshListener> internalRefreshListener, Sort indexSort,
TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService,
LongSupplier globalCheckpointSupplier) {
LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) {
this.shardId = shardId;
this.allocationId = allocationId;
this.indexSettings = indexSettings;
@ -152,6 +153,7 @@ public final class EngineConfig {
this.translogRecoveryRunner = translogRecoveryRunner;
this.circuitBreakerService = circuitBreakerService;
this.globalCheckpointSupplier = globalCheckpointSupplier;
this.primaryTermSupplier = primaryTermSupplier;
}
/**
@ -354,4 +356,11 @@ public final class EngineConfig {
public CircuitBreakerService getCircuitBreakerService() {
return this.circuitBreakerService;
}
/**
* Returns a supplier that supplies the latest primary term value of the associated shard.
*/
public LongSupplier getPrimaryTermSupplier() {
return primaryTermSupplier;
}
}

View File

@ -422,7 +422,7 @@ public class InternalEngine extends Engine {
final TranslogConfig translogConfig = engineConfig.getTranslogConfig();
final String translogUUID = loadTranslogUUIDFromLastCommit();
// We expect that this shard already exists, so it must already have an existing translog else something is badly wrong!
return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier);
return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier());
}
@Override

View File

@ -571,7 +571,9 @@ public final class QueryBuilders {
/**
* A filter based on doc/mapping type.
* @deprecated Types are going away, prefer filtering on a field.
*/
@Deprecated
public static TypeQueryBuilder typeQuery(String type) {
return new TypeQueryBuilder(type);
}

View File

@ -24,9 +24,11 @@ import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentLocation;
import org.elasticsearch.common.xcontent.XContentParser;
import java.io.IOException;
@ -203,18 +205,54 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
if (clauses.size() == 1) {
Query query = clauses.get(0).toQuery(context);
SpanQueryBuilder queryBuilder = clauses.get(0);
boolean isGap = queryBuilder instanceof SpanGapQueryBuilder;
Query query = null;
if (!isGap) {
query = queryBuilder.toQuery(context);
assert query instanceof SpanQuery;
}
if (clauses.size() == 1) {
assert !isGap;
return query;
}
SpanQuery[] spanQueries = new SpanQuery[clauses.size()];
for (int i = 0; i < clauses.size(); i++) {
Query query = clauses.get(i).toQuery(context);
assert query instanceof SpanQuery;
spanQueries[i] = (SpanQuery) query;
String spanNearFieldName = null;
if (isGap) {
spanNearFieldName = ((SpanGapQueryBuilder) queryBuilder).fieldName();
} else {
spanNearFieldName = ((SpanQuery) query).getField();
}
return new SpanNearQuery(spanQueries, slop, inOrder);
SpanNearQuery.Builder builder = new SpanNearQuery.Builder(spanNearFieldName, inOrder);
builder.setSlop(slop);
/*
* Lucene SpanNearQuery throws exceptions for certain use cases like adding gap to a
* unordered SpanNearQuery. Should ES have the same checks or wrap those thrown exceptions?
*/
if (isGap) {
int gap = ((SpanGapQueryBuilder) queryBuilder).width();
builder.addGap(gap);
} else {
builder.addClause((SpanQuery) query);
}
for (int i = 1; i < clauses.size(); i++) {
queryBuilder = clauses.get(i);
isGap = queryBuilder instanceof SpanGapQueryBuilder;
if (isGap) {
String fieldName = ((SpanGapQueryBuilder) queryBuilder).fieldName();
if (!spanNearFieldName.equals(fieldName)) {
throw new IllegalArgumentException("[span_near] clauses must have same field");
}
int gap = ((SpanGapQueryBuilder) queryBuilder).width();
builder.addGap(gap);
} else {
query = clauses.get(i).toQuery(context);
assert query instanceof SpanQuery;
builder.addClause((SpanQuery)query);
}
}
return builder.build();
}
@Override
@ -233,4 +271,168 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder<SpanNearQueryBuil
public String getWriteableName() {
return NAME;
}
/**
* SpanGapQueryBuilder enables gaps in a SpanNearQuery.
* Since, SpanGapQuery is private to SpanNearQuery, SpanGapQueryBuilder cannot
* be used to generate a Query (SpanGapQuery) like another QueryBuilder.
* Instead, it just identifies a span_gap clause so that SpanNearQuery.addGap(int)
* can be invoked for it.
* This QueryBuilder is only applicable as a clause in SpanGapQueryBuilder but
* yet to enforce this restriction.
*/
public static class SpanGapQueryBuilder implements SpanQueryBuilder {
public static final String NAME = "span_gap";
/** Name of field to match against. */
private final String fieldName;
/** Width of the gap introduced. */
private final int width;
/**
* Constructs a new SpanGapQueryBuilder term query.
*
* @param fieldName The name of the field
* @param width The width of the gap introduced
*/
public SpanGapQueryBuilder(String fieldName, int width) {
if (Strings.isEmpty(fieldName)) {
throw new IllegalArgumentException("[span_gap] field name is null or empty");
}
//lucene has not coded any restriction on value of width.
//to-do : find if theoretically it makes sense to apply restrictions.
this.fieldName = fieldName;
this.width = width;
}
/**
* Read from a stream.
*/
public SpanGapQueryBuilder(StreamInput in) throws IOException {
fieldName = in.readString();
width = in.readInt();
}
/**
* @return fieldName The name of the field
*/
public String fieldName() {
return fieldName;
}
/**
* @return width The width of the gap introduced
*/
public int width() {
return width;
}
@Override
public Query toQuery(QueryShardContext context) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public Query toFilter(QueryShardContext context) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public String queryName() {
throw new UnsupportedOperationException();
}
@Override
public QueryBuilder queryName(String queryName) {
throw new UnsupportedOperationException();
}
@Override
public float boost() {
throw new UnsupportedOperationException();
}
@Override
public QueryBuilder boost(float boost) {
throw new UnsupportedOperationException();
}
@Override
public String getName() {
return NAME;
}
@Override
public String getWriteableName() {
return NAME;
}
@Override
public final void writeTo(StreamOutput out) throws IOException {
out.writeString(fieldName);
out.writeInt(width);
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.startObject(getName());
builder.field(fieldName, width);
builder.endObject();
builder.endObject();
return builder;
}
public static SpanGapQueryBuilder fromXContent(XContentParser parser) throws IOException {
String fieldName = null;
int width = 0;
String currentFieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
throwParsingExceptionOnMultipleFields(NAME, parser.getTokenLocation(), fieldName, currentFieldName);
fieldName = currentFieldName;
} else if (token.isValue()) {
width = parser.intValue();
}
}
SpanGapQueryBuilder result = new SpanGapQueryBuilder(fieldName, width);
return result;
}
@Override
public final boolean equals(Object obj) {
if (this == obj) {
return true;
}
if (obj == null || getClass() != obj.getClass()) {
return false;
}
SpanGapQueryBuilder other = (SpanGapQueryBuilder) obj;
return Objects.equals(fieldName, other.fieldName) &&
Objects.equals(width, other.width);
}
@Override
public final int hashCode() {
return Objects.hash(getClass(), fieldName, width);
}
@Override
public final String toString() {
return Strings.toString(this, true, true);
}
//copied from AbstractQueryBuilder
protected static void throwParsingExceptionOnMultipleFields(String queryName, XContentLocation contentLocation,
String processedFieldName, String currentFieldName) {
if (processedFieldName != null) {
throw new ParsingException(contentLocation, "[" + queryName + "] query doesn't support multiple fields, found ["
+ processedFieldName + "] and [" + currentFieldName + "]");
}
}
}
}

View File

@ -27,6 +27,8 @@ import org.elasticsearch.common.ParseField;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentParser;
@ -39,6 +41,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
public static final String NAME = "type";
private static final ParseField VALUE_FIELD = new ParseField("value");
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeQueryBuilder.class));
private final String type;
@ -125,6 +128,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder<TypeQueryBuilder> {
@Override
protected Query doToQuery(QueryShardContext context) throws IOException {
DEPRECATION_LOGGER.deprecated("The [type] query is deprecated, filter on a field instead.");
//LUCENE 4 UPGRADE document mapper should use bytesref as well?
DocumentMapper documentMapper = context.getMapperService().documentMapper(type);
if (documentMapper == null) {

View File

@ -2135,7 +2135,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()),
Collections.singletonList(refreshListeners),
Collections.singletonList(new RefreshMetricUpdater(refreshMetric)),
indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker);
indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, this::getPrimaryTerm);
}
/**

View File

@ -393,7 +393,8 @@ final class StoreRecovery {
store.bootstrapNewHistory();
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId);
final String translogUUID = Translog.createEmptyTranslog(
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm());
store.associateIndexWithNewTranslog(translogUUID);
} else if (indexShouldExists) {
// since we recover from local, just fill the files and size
@ -407,8 +408,8 @@ final class StoreRecovery {
}
} else {
store.createEmpty();
final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(),
SequenceNumbers.NO_OPS_PERFORMED, shardId);
final String translogUUID = Translog.createEmptyTranslog(
indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPrimaryTerm());
store.associateIndexWithNewTranslog(translogUUID);
}
indexShard.openEngineAndRecoverFromTranslog();
@ -456,7 +457,8 @@ final class StoreRecovery {
store.bootstrapNewHistory();
final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo();
final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO));
final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId);
final String translogUUID = Translog.createEmptyTranslog(
indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm());
store.associateIndexWithNewTranslog(translogUUID);
assert indexShard.shardRouting.primary() : "only primary shards can recover from store";
indexShard.openEngineAndRecoverFromTranslog();

View File

@ -35,15 +35,15 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
protected final long generation;
protected final FileChannel channel;
protected final Path path;
protected final long firstOperationOffset;
protected final TranslogHeader header;
public BaseTranslogReader(long generation, FileChannel channel, Path path, long firstOperationOffset) {
public BaseTranslogReader(long generation, FileChannel channel, Path path, TranslogHeader header) {
assert Translog.parseIdFromFileName(path) == generation : "generation mismatch. Path: " + Translog.parseIdFromFileName(path) + " but generation: " + generation;
this.generation = generation;
this.path = path;
this.channel = channel;
this.firstOperationOffset = firstOperationOffset;
this.header = header;
}
public long getGeneration() {
@ -57,7 +57,14 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
abstract Checkpoint getCheckpoint();
public final long getFirstOperationOffset() {
return firstOperationOffset;
return header.sizeInBytes();
}
/**
* Returns the primary term associated with this translog reader.
*/
public final long getPrimaryTerm() {
return header.getPrimaryTerm();
}
/** read the size of the op (i.e., number of bytes, including the op size) written at the given position */
@ -100,7 +107,12 @@ public abstract class BaseTranslogReader implements Comparable<BaseTranslogReade
}
protected Translog.Operation read(BufferedChecksumStreamInput inStream) throws IOException {
return Translog.readOperation(inStream);
final Translog.Operation op = Translog.readOperation(inStream);
if (op.primaryTerm() > getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) {
throw new TranslogCorruptedException("Operation's term is newer than translog header term; " +
"operation term[" + op.primaryTerm() + "], translog header term [" + getPrimaryTerm() + "]");
}
return op;
}
/**

View File

@ -107,7 +107,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
public static final String CHECKPOINT_FILE_NAME = "translog" + CHECKPOINT_SUFFIX;
static final Pattern PARSE_STRICT_ID_PATTERN = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)(\\.tlog)$");
public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogWriter.getHeaderLength(UUIDs.randomBase64UUID());
public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogHeader.headerSizeInBytes(UUIDs.randomBase64UUID());
// the list of translog readers is guaranteed to be in order of translog generation
private final List<TranslogReader> readers = new ArrayList<>();
@ -120,6 +120,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private final AtomicBoolean closed = new AtomicBoolean();
private final TranslogConfig config;
private final LongSupplier globalCheckpointSupplier;
private final LongSupplier primaryTermSupplier;
private final String translogUUID;
private final TranslogDeletionPolicy deletionPolicy;
@ -131,17 +132,22 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
* translog file referenced by this generation. The translog creation will fail if this generation can't be opened.
*
* @param config the configuration of this translog
* @param translogUUID the translog uuid to open, null for a new translog
* @param translogUUID the translog uuid to open, null for a new translog
* @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely
* deleted
* @param globalCheckpointSupplier a supplier for the global checkpoint
* @param primaryTermSupplier a supplier for the latest value of primary term of the owning index shard. The latest term value is
* examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside
* that a new generation is rolled when the term is increased. This guarantee allows to us to validate
* and reject operation whose term is higher than the primary term stored in the translog header.
*/
public Translog(
final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy,
final LongSupplier globalCheckpointSupplier) throws IOException {
final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier) throws IOException {
super(config.getShardId(), config.getIndexSettings());
this.config = config;
this.globalCheckpointSupplier = globalCheckpointSupplier;
this.primaryTermSupplier = primaryTermSupplier;
this.deletionPolicy = deletionPolicy;
this.translogUUID = translogUUID;
bigArrays = config.getBigArrays();
@ -163,7 +169,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
//
// For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists
// if not we don't even try to clean it up and wait until we fail creating it
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]";
if (Files.exists(currentCheckpointFile) // current checkpoint is already copied
&& Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning
logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName());
@ -224,6 +230,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive");
}
final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i))));
assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() :
"Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" +
"translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]";
foundTranslogs.add(reader);
logger.debug("recovered local translog from checkpoint {}", checkpoint);
}
@ -267,10 +276,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException {
return openReader(path, checkpoint, translogUUID);
}
private static TranslogReader openReader(Path path, Checkpoint checkpoint, String translogUUID) throws IOException {
FileChannel channel = FileChannel.open(path, StandardOpenOption.READ);
try {
assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation;
@ -459,7 +464,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
getChannelFactory(),
config.getBufferSize(),
initialMinTranslogGen, initialGlobalCheckpoint,
globalCheckpointSupplier, this::getMinFileGeneration);
globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong());
} catch (final IOException e) {
throw new TranslogException(shardId, "failed to create new translog file", e);
}
@ -487,6 +492,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
final ReleasablePagedBytesReference bytes = out.bytes();
try (ReleasableLock ignored = readLock.acquire()) {
ensureOpen();
if (operation.primaryTerm() > current.getPrimaryTerm()) {
throw new IllegalArgumentException("Operation term is newer than the current term;"
+ "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]");
}
return current.add(bytes, operation.seqNo());
}
} catch (final AlreadyClosedException | IOException ex) {
@ -1005,17 +1014,17 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp();
}
public Index(String type, String id, long seqNo, byte[] source) {
this(type, id, seqNo, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, -1);
public Index(String type, String id, long seqNo, long primaryTerm, byte[] source) {
this(type, id, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, -1);
}
public Index(String type, String id, long seqNo, long version, VersionType versionType, byte[] source, String routing,
long autoGeneratedIdTimestamp) {
public Index(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType,
byte[] source, String routing, long autoGeneratedIdTimestamp) {
this.type = type;
this.id = id;
this.source = new BytesArray(source);
this.seqNo = seqNo;
this.primaryTerm = 0;
this.primaryTerm = primaryTerm;
this.version = version;
this.versionType = versionType;
this.routing = routing;
@ -1173,8 +1182,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
/** utility for testing */
public Delete(String type, String id, long seqNo, Term uid) {
this(type, id, uid, seqNo, 0, Versions.MATCH_ANY, VersionType.INTERNAL);
public Delete(String type, String id, long seqNo, long primaryTerm, Term uid) {
this(type, id, uid, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL);
}
public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType) {
@ -1378,10 +1387,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
}
private static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException {
static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException {
// This absolutely must come first, or else reading the checksum becomes part of the checksum
long expectedChecksum = in.getChecksum();
long readChecksum = in.readInt() & 0xFFFF_FFFFL;
long readChecksum = Integer.toUnsignedLong(in.readInt());
if (readChecksum != expectedChecksum) {
throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" +
Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum));
@ -1665,10 +1674,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
private static Checkpoint readCheckpoint(Path location, String expectedTranslogUUID) throws IOException {
final Checkpoint checkpoint = readCheckpoint(location);
// We need to open at least translog reader to validate the translogUUID.
// We need to open at least one translog header to validate the translogUUID.
final Path translogFile = location.resolve(getFilename(checkpoint.generation));
try (TranslogReader reader = openReader(translogFile, checkpoint, expectedTranslogUUID)) {
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
TranslogHeader.read(expectedTranslogUUID, translogFile, channel);
} catch (TranslogCorruptedException ex) {
throw ex; // just bubble up.
} catch (Exception ex) {
@ -1707,13 +1716,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
return readers;
}
public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, final ShardId shardId)
throws IOException {
public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint,
final ShardId shardId, final long primaryTerm) throws IOException {
final ChannelFactory channelFactory = FileChannel::open;
return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory);
return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory, primaryTerm);
}
static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, ChannelFactory channelFactory) throws IOException {
static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId,
ChannelFactory channelFactory, long primaryTerm) throws IOException {
IOUtils.rm(location);
Files.createDirectories(location);
final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
@ -1723,7 +1733,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
final String translogUUID = UUIDs.randomBase64UUID();
TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory,
new ByteSizeValue(10), 1, initialGlobalCheckpoint,
() -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }
() -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm
);
writer.close();
return translogUUID;

View File

@ -0,0 +1,195 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.store.InputStreamDataInput;
import org.apache.lucene.store.OutputStreamDataOutput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.Channels;
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.file.Path;
/**
* Each translog file is started with a translog header then followed by translog operations.
*/
final class TranslogHeader {
public static final String TRANSLOG_CODEC = "translog";
public static final int VERSION_CHECKSUMS = 1; // pre-2.0 - unsupported
public static final int VERSION_CHECKPOINTS = 2; // added checkpoints
public static final int VERSION_PRIMARY_TERM = 3; // added primary term
public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM;
public static final long UNKNOWN_PRIMARY_TERM = 0L;
private final String translogUUID;
private final long primaryTerm;
private final int headerSizeInBytes;
/**
* Creates a new translog header with the given uuid and primary term.
*
* @param translogUUID this UUID is used to prevent accidental recovery from a transaction log that belongs to a
* different engine
* @param primaryTerm the primary term of the owning index shard when creating (eg. rolling) this translog file.
* All operations' terms in this translog file are enforced to be at most this term.
*/
TranslogHeader(String translogUUID, long primaryTerm) {
this(translogUUID, primaryTerm, headerSizeInBytes(translogUUID));
assert primaryTerm >= 0 : "Primary term must be non-negative; term [" + primaryTerm + "]";
}
private TranslogHeader(String translogUUID, long primaryTerm, int headerSizeInBytes) {
this.translogUUID = translogUUID;
this.primaryTerm = primaryTerm;
this.headerSizeInBytes = headerSizeInBytes;
}
public String getTranslogUUID() {
return translogUUID;
}
/**
* Returns the primary term stored in this translog header.
* All operations in a translog file are expected to have their primary terms at most this term.
*/
public long getPrimaryTerm() {
return primaryTerm;
}
/**
* Returns the header size in bytes. This value can be used as the offset of the first translog operation.
* See {@link BaseTranslogReader#getFirstOperationOffset()}
*/
public int sizeInBytes() {
return headerSizeInBytes;
}
static int headerSizeInBytes(String translogUUID) {
return headerSizeInBytes(CURRENT_VERSION, new BytesRef(translogUUID).length);
}
private static int headerSizeInBytes(int version, int uuidLength) {
int size = CodecUtil.headerLength(TRANSLOG_CODEC);
size += Integer.BYTES + uuidLength; // uuid
if (version >= VERSION_PRIMARY_TERM) {
size += Long.BYTES; // primary term
size += Integer.BYTES; // checksum
}
return size;
}
/**
* Read a translog header from the given path and file channel
*/
static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException {
// This input is intentionally not closed because closing it will close the FileChannel.
final BufferedChecksumStreamInput in =
new BufferedChecksumStreamInput(new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size()));
final int version;
try {
version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_CHECKSUMS, VERSION_PRIMARY_TERM);
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
tryReportOldVersionError(path, channel);
throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e);
}
if (version == VERSION_CHECKSUMS) {
throw new IllegalStateException("pre-2.0 translog found [" + path + "]");
}
// Read the translogUUID
final int uuidLen = in.readInt();
if (uuidLen > channel.size()) {
throw new TranslogCorruptedException("uuid length can't be larger than the translog");
}
final BytesRef uuid = new BytesRef(uuidLen);
uuid.length = uuidLen;
in.read(uuid.bytes, uuid.offset, uuid.length);
final BytesRef expectedUUID = new BytesRef(translogUUID);
if (uuid.bytesEquals(expectedUUID) == false) {
throw new TranslogCorruptedException("expected shard UUID " + expectedUUID + " but got: " + uuid +
" this translog file belongs to a different translog. path:" + path);
}
// Read the primary term
final long primaryTerm;
if (version == VERSION_PRIMARY_TERM) {
primaryTerm = in.readLong();
assert primaryTerm >= 0 : "Primary term must be non-negative [" + primaryTerm + "]; translog path [" + path + "]";
} else {
assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]";
primaryTerm = UNKNOWN_PRIMARY_TERM;
}
// Verify the checksum
if (version >= VERSION_PRIMARY_TERM) {
Translog.verifyChecksum(in);
}
final int headerSizeInBytes = headerSizeInBytes(version, uuid.length);
assert channel.position() == headerSizeInBytes :
"Header is not fully read; header size [" + headerSizeInBytes + "], position [" + channel.position() + "]";
return new TranslogHeader(translogUUID, primaryTerm, headerSizeInBytes);
}
private static void tryReportOldVersionError(final Path path, final FileChannel channel) throws IOException {
// Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the header, in binary this looks like:
// binary: 0011 1111 1101 0111 0110 1100 0001 0111
// hex : 3 f d 7 6 c 1 7
//
// With version 0 of the translog, the first byte is the Operation.Type, which will always be between 0-4,
// so we know if we grab the first byte, it can be:
// 0x3f => Lucene's magic number, so we can assume it's version 1 or later
// 0x00 => version 0 of the translog
final byte b1 = Channels.readFromFileChannel(channel, 0, 1)[0];
if (b1 == 0x3f) { // LUCENE_CODEC_HEADER_BYTE
throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path);
} else if (b1 == 0x00) { // UNVERSIONED_TRANSLOG_HEADER_BYTE
throw new IllegalStateException("pre-1.4 translog found [" + path + "]");
}
}
/**
* Writes this header with the latest format into the file channel
*/
void write(final FileChannel channel) throws IOException {
// This output is intentionally not closed because closing it will close the FileChannel.
@SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"})
final BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput(
new OutputStreamStreamOutput(java.nio.channels.Channels.newOutputStream(channel)));
CodecUtil.writeHeader(new OutputStreamDataOutput(out), TRANSLOG_CODEC, CURRENT_VERSION);
// Write uuid
final BytesRef uuid = new BytesRef(translogUUID);
out.writeInt(uuid.length);
out.writeBytes(uuid.bytes, uuid.offset, uuid.length);
// Write primary term
out.writeLong(primaryTerm);
// Checksum header
out.writeInt((int) out.getChecksum());
out.flush();
channel.force(true);
assert channel.position() == headerSizeInBytes :
"Header is not fully written; header size [" + headerSizeInBytes + "], channel position [" + channel.position() + "]";
}
}

View File

@ -19,15 +19,8 @@
package org.elasticsearch.index.translog;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexFormatTooNewException;
import org.apache.lucene.index.IndexFormatTooOldException;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.InputStreamDataInput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.io.Channels;
import org.elasticsearch.common.io.stream.InputStreamStreamInput;
import java.io.Closeable;
import java.io.EOFException;
@ -41,10 +34,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
* an immutable translog filereader
*/
public class TranslogReader extends BaseTranslogReader implements Closeable {
private static final byte LUCENE_CODEC_HEADER_BYTE = 0x3f;
private static final byte UNVERSIONED_TRANSLOG_HEADER_BYTE = 0x00;
protected final long length;
private final int totalOperations;
private final Checkpoint checkpoint;
@ -53,13 +42,13 @@ public class TranslogReader extends BaseTranslogReader implements Closeable {
/**
* Create a translog writer against the specified translog file channel.
*
* @param checkpoint the translog checkpoint
* @param channel the translog file channel to open a translog reader against
* @param path the path to the translog
* @param firstOperationOffset the offset to the first operation
* @param checkpoint the translog checkpoint
* @param channel the translog file channel to open a translog reader against
* @param path the path to the translog
* @param header the header of the translog file
*/
TranslogReader(final Checkpoint checkpoint, final FileChannel channel, final Path path, final long firstOperationOffset) {
super(checkpoint.generation, channel, path, firstOperationOffset);
TranslogReader(final Checkpoint checkpoint, final FileChannel channel, final Path path, final TranslogHeader header) {
super(checkpoint.generation, channel, path, header);
this.length = checkpoint.offset;
this.totalOperations = checkpoint.numOps;
this.checkpoint = checkpoint;
@ -77,75 +66,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable {
*/
public static TranslogReader open(
final FileChannel channel, final Path path, final Checkpoint checkpoint, final String translogUUID) throws IOException {
try {
InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel),
channel.size()); // don't close
// Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the
// header, in binary this looks like:
//
// binary: 0011 1111 1101 0111 0110 1100 0001 0111
// hex : 3 f d 7 6 c 1 7
//
// With version 0 of the translog, the first byte is the
// Operation.Type, which will always be between 0-4, so we know if
// we grab the first byte, it can be:
// 0x3f => Lucene's magic number, so we can assume it's version 1 or later
// 0x00 => version 0 of the translog
//
// otherwise the first byte of the translog is corrupted and we
// should bail
byte b1 = headerStream.readByte();
if (b1 == LUCENE_CODEC_HEADER_BYTE) {
// Read 3 more bytes, meaning a whole integer has been read
byte b2 = headerStream.readByte();
byte b3 = headerStream.readByte();
byte b4 = headerStream.readByte();
// Convert the 4 bytes that were read into an integer
int header = ((b1 & 0xFF) << 24) + ((b2 & 0xFF) << 16) + ((b3 & 0xFF) << 8) + ((b4 & 0xFF) << 0);
// We confirm CodecUtil's CODEC_MAGIC number (0x3FD76C17)
// ourselves here, because it allows us to read the first
// byte separately
if (header != CodecUtil.CODEC_MAGIC) {
throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path);
}
// Confirm the rest of the header using CodecUtil, extracting
// the translog version
int version = CodecUtil.checkHeaderNoMagic(new InputStreamDataInput(headerStream), TranslogWriter.TRANSLOG_CODEC, 1, Integer.MAX_VALUE);
switch (version) {
case TranslogWriter.VERSION_CHECKSUMS:
throw new IllegalStateException("pre-2.0 translog found [" + path + "]");
case TranslogWriter.VERSION_CHECKPOINTS:
assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX) : "new file ends with old suffix: " + path;
assert checkpoint.numOps >= 0 : "expected at least 0 operation but got: " + checkpoint.numOps;
assert checkpoint.offset <= channel.size() : "checkpoint is inconsistent with channel length: " + channel.size() + " " + checkpoint;
int len = headerStream.readInt();
if (len > channel.size()) {
throw new TranslogCorruptedException("uuid length can't be larger than the translog");
}
BytesRef ref = new BytesRef(len);
ref.length = len;
headerStream.read(ref.bytes, ref.offset, ref.length);
BytesRef uuidBytes = new BytesRef(translogUUID);
if (uuidBytes.bytesEquals(ref) == false) {
throw new TranslogCorruptedException("expected shard UUID " + uuidBytes + " but got: " + ref +
" this translog file belongs to a different translog. path:" + path);
}
final long firstOperationOffset;
firstOperationOffset = ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES;
return new TranslogReader(checkpoint, channel, path, firstOperationOffset);
default:
throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path);
}
} else if (b1 == UNVERSIONED_TRANSLOG_HEADER_BYTE) {
throw new IllegalStateException("pre-1.4 translog found [" + path + "]");
} else {
throw new TranslogCorruptedException("Invalid first byte in translog file, got: " + Long.toHexString(b1) + ", expected 0x00 or 0x3f. path:" + path);
}
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) {
throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e);
}
final TranslogHeader header = TranslogHeader.read(translogUUID, path, channel);
return new TranslogReader(checkpoint, channel, path, header);
}
public long sizeInBytes() {
@ -168,8 +90,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable {
if (position >= length) {
throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]");
}
if (position < firstOperationOffset) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + firstOperationOffset + "]");
if (position < getFirstOperationOffset()) {
throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "]");
}
Channels.readFromFileChannelWithEofException(channel, position, buffer);
}

View File

@ -39,14 +39,14 @@ final class TranslogSnapshot extends BaseTranslogReader {
* Create a snapshot of translog file channel.
*/
TranslogSnapshot(final BaseTranslogReader reader, final long length) {
super(reader.generation, reader.channel, reader.path, reader.firstOperationOffset);
super(reader.generation, reader.channel, reader.path, reader.header);
this.length = length;
this.totalOperations = reader.totalOperations();
this.checkpoint = reader.getCheckpoint();
this.reusableBuffer = ByteBuffer.allocate(1024);
readOperations = 0;
position = firstOperationOffset;
reuse = null;
this.readOperations = 0;
this.position = reader.getFirstOperationOffset();
this.reuse = null;
}
@Override

View File

@ -19,10 +19,8 @@
package org.elasticsearch.index.translog;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.OutputStreamDataOutput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.Assertions;
import org.elasticsearch.common.bytes.BytesArray;
@ -47,12 +45,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.LongSupplier;
public class TranslogWriter extends BaseTranslogReader implements Closeable {
public static final String TRANSLOG_CODEC = "translog";
public static final int VERSION_CHECKSUMS = 1;
public static final int VERSION_CHECKPOINTS = 2; // since 2.0 we have checkpoints?
public static final int VERSION = VERSION_CHECKPOINTS;
private final ShardId shardId;
private final ChannelFactory channelFactory;
// the last checkpoint that was written when the translog was last synced
@ -85,10 +77,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
final FileChannel channel,
final Path path,
final ByteSizeValue bufferSize,
final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier) throws IOException {
super(initialCheckpoint.generation, channel, path, channel.position());
final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header) throws IOException {
super(initialCheckpoint.generation, channel, path, header);
assert initialCheckpoint.offset == channel.position() :
"initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel poistion ["
"initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel position ["
+ channel.position() + "]";
this.shardId = shardId;
this.channelFactory = channelFactory;
@ -104,34 +96,16 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null;
}
static int getHeaderLength(String translogUUID) {
return getHeaderLength(new BytesRef(translogUUID).length);
}
static int getHeaderLength(int uuidLength) {
return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + Integer.BYTES;
}
static void writeHeader(OutputStreamDataOutput out, BytesRef ref) throws IOException {
CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION);
out.writeInt(ref.length);
out.writeBytes(ref.bytes, ref.offset, ref.length);
}
public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory,
ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint,
final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier)
final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier,
final long primaryTerm)
throws IOException {
final BytesRef ref = new BytesRef(translogUUID);
final int firstOperationOffset = getHeaderLength(ref.length);
final FileChannel channel = channelFactory.open(file);
try {
// This OutputStreamDataOutput is intentionally not closed because
// closing it will close the FileChannel
final OutputStreamDataOutput out = new OutputStreamDataOutput(java.nio.channels.Channels.newOutputStream(channel));
writeHeader(out, ref);
channel.force(true);
final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(firstOperationOffset, fileGeneration,
final TranslogHeader header = new TranslogHeader(translogUUID, primaryTerm);
header.write(channel);
final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(header.sizeInBytes(), fileGeneration,
initialGlobalCheckpoint, initialMinTranslogGen);
writeCheckpoint(channelFactory, file.getParent(), checkpoint);
final LongSupplier writerGlobalCheckpointSupplier;
@ -146,7 +120,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
writerGlobalCheckpointSupplier = globalCheckpointSupplier;
}
return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize,
writerGlobalCheckpointSupplier, minTranslogGenerationSupplier);
writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header);
} catch (Exception exception) {
// if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that
// file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition
@ -295,7 +269,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable {
throw ex;
}
if (closed.compareAndSet(false, true)) {
return new TranslogReader(getLastSyncedCheckpoint(), channel, path, getFirstOperationOffset());
return new TranslogReader(getLastSyncedCheckpoint(), channel, path, header);
} else {
throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy);
}

View File

@ -33,7 +33,6 @@ import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.NativeFSLockFactory;
import org.apache.lucene.store.OutputStreamDataOutput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cli.EnvironmentAwareCommand;
@ -213,13 +212,11 @@ public class TruncateTranslogCommand extends EnvironmentAwareCommand {
* Write a translog containing the given translog UUID to the given location. Returns the number of bytes written.
*/
public static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException {
final BytesRef translogRef = new BytesRef(translogUUID);
try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW);
OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) {
TranslogWriter.writeHeader(out, translogRef);
fc.force(true);
try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) {
TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM);
header.write(fc);
return header.sizeInBytes();
}
return TranslogWriter.getHeaderLength(translogRef.length);
}
/** Show a warning about deleting files, asking for a confirmation if {@code batchMode} is false */

View File

@ -441,8 +441,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
store.ensureIndexHasHistoryUUID();
}
// TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2
final String translogUUID =
Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId);
final String translogUUID = Translog.createEmptyTranslog(
indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPrimaryTerm());
store.associateIndexWithNewTranslog(translogUUID);
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
// this is a fatal exception at this stage.

View File

@ -93,6 +93,7 @@ import org.elasticsearch.gateway.GatewayModule;
import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.gateway.MetaStateService;
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.index.analysis.AnalysisRegistry;
import org.elasticsearch.indices.IndicesModule;
import org.elasticsearch.indices.IndicesService;
@ -542,7 +543,7 @@ public class Node implements Closeable {
resourcesToClose.addAll(pluginLifecycleComponents);
this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents);
client.initialize(injector.getInstance(new Key<Map<GenericAction, TransportAction>>() {}),
() -> clusterService.localNode().getId());
() -> clusterService.localNode().getId(), transportService.getRemoteClusterService());
if (NetworkModule.HTTP_ENABLED.get(settings)) {
logger.debug("initializing HTTP handlers ...");

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.rest.BaseRestHandler;
import org.elasticsearch.rest.RestController;
import org.elasticsearch.rest.RestRequest;
@ -76,7 +77,7 @@ public class RestBulkAction extends BaseRestHandler {
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
BulkRequest bulkRequest = Requests.bulkRequest();
String defaultIndex = request.param("index");
String defaultType = request.param("type");
String defaultType = request.param("type", MapperService.SINGLE_MAPPING_NAME);
String defaultRouting = request.param("routing");
FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request);
String fieldsParam = request.param("fields");

View File

@ -23,6 +23,8 @@ import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.client.node.NodeClient;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.index.query.QueryBuilder;
@ -55,6 +57,7 @@ public class RestSearchAction extends BaseRestHandler {
public static final String TYPED_KEYS_PARAM = "typed_keys";
private static final Set<String> RESPONSE_PARAMS = Collections.singleton(TYPED_KEYS_PARAM);
private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestSearchAction.class));
public RestSearchAction(Settings settings, RestController controller) {
super(settings);
@ -147,7 +150,11 @@ public class RestSearchAction extends BaseRestHandler {
searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll")));
}
searchRequest.types(Strings.splitStringByCommaToArray(request.param("type")));
String types = request.param("type");
if (types != null) {
DEPRECATION_LOGGER.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead");
}
searchRequest.types(Strings.splitStringByCommaToArray(types));
searchRequest.routing(request.param("routing"));
searchRequest.preference(request.param("preference"));
searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions()));

View File

@ -260,6 +260,7 @@ import java.util.function.Function;
import static java.util.Collections.unmodifiableMap;
import static java.util.Objects.requireNonNull;
import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder;
/**
* Sets up things that can be done at search time like queries, aggregations, and suggesters.
@ -741,6 +742,7 @@ public class SearchModule {
FieldMaskingSpanQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanFirstQueryBuilder.NAME, SpanFirstQueryBuilder::new, SpanFirstQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanNearQueryBuilder.NAME, SpanNearQueryBuilder::new, SpanNearQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanGapQueryBuilder.NAME, SpanGapQueryBuilder::new, SpanGapQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(SpanOrQueryBuilder.NAME, SpanOrQueryBuilder::new, SpanOrQueryBuilder::fromXContent));
registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new,
MoreLikeThisQueryBuilder::fromXContent));

View File

@ -0,0 +1,67 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.support.AbstractClient;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
final class RemoteClusterAwareClient extends AbstractClient {
private final TransportService service;
private final String clusterAlias;
private final RemoteClusterService remoteClusterService;
RemoteClusterAwareClient(Settings settings, ThreadPool threadPool, TransportService service, String clusterAlias) {
super(settings, threadPool);
this.service = service;
this.clusterAlias = clusterAlias;
this.remoteClusterService = service.getRemoteClusterService();
}
@Override
protected <Request extends ActionRequest, Response extends ActionResponse, RequestBuilder extends
ActionRequestBuilder<Request, Response, RequestBuilder>>
void doExecute(Action<Request, Response, RequestBuilder> action, Request request, ActionListener<Response> listener) {
remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(res -> {
Transport.Connection connection = remoteClusterService.getConnection(clusterAlias);
service.sendRequest(connection, action.name(), request, TransportRequestOptions.EMPTY,
new ActionListenerResponseHandler<>(listener, action::newResponse));
},
listener::onFailure));
}
@Override
public void close() {
// do nothing
}
@Override
public Client getRemoteClusterClient(String clusterAlias) {
return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias);
}
}

View File

@ -18,6 +18,7 @@
*/
package org.elasticsearch.transport;
import org.elasticsearch.client.Client;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@ -36,6 +37,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.Closeable;
import java.io.IOException;
@ -398,4 +400,18 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl
});
}
}
/**
* Returns a client to the remote cluster if the given cluster alias exists.
* @param threadPool the {@link ThreadPool} for the client
* @param clusterAlias the cluster alias the remote cluster is registered under
*
* @throws IllegalArgumentException if the given clusterAlias doesn't exist
*/
public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) {
if (transportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) {
throw new IllegalArgumentException("unknown cluster alias [" + clusterAlias + "]");
}
return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias);
}
}

View File

@ -27,6 +27,9 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.indices.segments.IndexShardSegments;
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse;
import org.elasticsearch.action.admin.indices.segments.ShardSegments;
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
@ -64,6 +67,7 @@ import org.elasticsearch.test.VersionUtils;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.stream.IntStream;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
@ -467,4 +471,77 @@ public class ShrinkIndexIT extends ESIntegTestCase {
flushAndRefresh();
assertSortedSegments("target", expectedIndexSort);
}
public void testShrinkCommitsMergeOnIdle() throws Exception {
prepareCreate("source").setSettings(Settings.builder().put(indexSettings())
.put("index.number_of_replicas", 0)
.put("number_of_shards", 5)).get();
for (int i = 0; i < 30; i++) {
client().prepareIndex("source", "type")
.setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
client().admin().indices().prepareFlush("source").get();
ImmutableOpenMap<String, DiscoveryNode> dataNodes =
client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source")
.setSettings(Settings.builder()
.put("index.routing.allocation.require._name", discoveryNodes[0].getName())
.put("index.blocks.write", true)).get();
ensureGreen();
IndicesSegmentResponse sourceStats = client().admin().indices().prepareSegments("source").get();
// disable rebalancing to be able to capture the right stats. balancing can move the target primary
// making it hard to pin point the source shards.
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none"
)).get();
// now merge source into a single shard index
assertAcked(client().admin().indices().prepareResizeIndex("source", "target")
.setSettings(Settings.builder().put("index.number_of_replicas", 0).build()).get());
ensureGreen();
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
IndexMetaData target = clusterStateResponse.getState().getMetaData().index("target");
client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get();
IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get();
ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0];
assertTrue(segmentsStats.getNumberOfCommitted() > 0);
assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted());
Iterable<IndicesService> dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class);
for (IndicesService service : dataNodeInstances) {
if (service.hasIndex(target.getIndex())) {
IndexService indexShards = service.indexService(target.getIndex());
IndexShard shard = indexShards.getShard(0);
assertTrue(shard.isActive());
shard.checkIdle(0);
assertFalse(shard.isActive());
}
}
assertBusy(() -> {
IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get();
ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0];
Map<Integer, IndexShardSegments> source = sourceStats.getIndices().get("source").getShards();
int numSourceSegments = 0;
for (IndexShardSegments s : source.values()) {
numSourceSegments += s.getAt(0).getNumberOfCommitted();
}
assertTrue(targetShardSegments.getSegments().size() < numSourceSegments);
assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getNumberOfSearch());
assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getSegments().size());
assertEquals(1, targetShardSegments.getSegments().size());
});
// clean up
client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put(
EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null
)).get();
}
}

View File

@ -37,7 +37,8 @@ public class ResyncReplicationRequestTests extends ESTestCase {
public void testSerialization() throws IOException {
final byte[] bytes = "{}".getBytes(Charset.forName("UTF-8"));
final Translog.Index index = new Translog.Index("type", "id", 0, Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, -1);
final Translog.Index index = new Translog.Index("type", "id", 0, randomNonNegativeLong(),
Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, -1);
final ShardId shardId = new ShardId(new Index("index", "uuid"), 0);
final ResyncReplicationRequest before = new ResyncReplicationRequest(shardId, new Translog.Operation[]{index});

View File

@ -43,7 +43,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase {
Settings settings = HEADER_SETTINGS;
Actions actions = new Actions(settings, threadPool, testedActions);
NodeClient client = new NodeClient(settings, threadPool);
client.initialize(actions, () -> "test");
client.initialize(actions, () -> "test", null);
return client;
}

View File

@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.geo.GeoPoint;
import org.elasticsearch.common.lucene.BytesRefs;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.test.ESTestCase;
import org.joda.time.DateTimeZone;
@ -41,6 +42,7 @@ import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.TreeMap;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
@ -812,4 +814,30 @@ public class BytesStreamsTests extends ESTestCase {
}
assertEquals(0, input.available());
}
private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException {
BytesStreamOutput out = new BytesStreamOutput();
out.writeTimeValue(value);
assertEquals(expectedSize, out.size());
StreamInput in = out.bytes().streamInput();
TimeValue inValue = in.readTimeValue();
assertThat(inValue, equalTo(value));
assertThat(inValue.duration(), equalTo(value.duration()));
assertThat(inValue.timeUnit(), equalTo(value.timeUnit()));
}
public void testTimeValueSerialize() throws Exception {
assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3);
assertEqualityAfterSerialize(TimeValue.timeValueNanos(-1), 2);
assertEqualityAfterSerialize(TimeValue.timeValueNanos(1), 2);
assertEqualityAfterSerialize(TimeValue.timeValueSeconds(30), 2);
final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values()));
BytesStreamOutput out = new BytesStreamOutput();
out.writeZLong(timeValue.duration());
assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length());
}
}

View File

@ -33,6 +33,7 @@ import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.stream.IntStream;
import java.nio.charset.StandardCharsets;
import static org.elasticsearch.common.logging.DeprecationLogger.WARNING_HEADER_PATTERN;
import static org.elasticsearch.test.hamcrest.RegexMatcher.matches;
@ -246,6 +247,60 @@ public class DeprecationLoggerTests extends ESTestCase {
assertThat(DeprecationLogger.encode(s), IsSame.sameInstance(s));
}
public void testWarningHeaderCountSetting() throws IOException{
// Test that the number of warning headers don't exceed 'http.max_warning_header_count'
final int maxWarningHeaderCount = 2;
Settings settings = Settings.builder()
.put("http.max_warning_header_count", maxWarningHeaderCount)
.build();
try (ThreadContext threadContext = new ThreadContext(settings)) {
final Set<ThreadContext> threadContexts = Collections.singleton(threadContext);
// try to log three warning messages
logger.deprecated(threadContexts, "A simple message 1");
logger.deprecated(threadContexts, "A simple message 2");
logger.deprecated(threadContexts, "A simple message 3");
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final List<String> responses = responseHeaders.get("Warning");
assertEquals(maxWarningHeaderCount, responses.size());
assertThat(responses.get(0), warningValueMatcher);
assertThat(responses.get(0), containsString("\"A simple message 1"));
assertThat(responses.get(1), warningValueMatcher);
assertThat(responses.get(1), containsString("\"A simple message 2"));
}
}
public void testWarningHeaderSizeSetting() throws IOException{
// Test that the size of warning headers don't exceed 'http.max_warning_header_size'
Settings settings = Settings.builder()
.put("http.max_warning_header_size", "1Kb")
.build();
byte [] arr = new byte[300];
String message1 = new String(arr, StandardCharsets.UTF_8) + "1";
String message2 = new String(arr, StandardCharsets.UTF_8) + "2";
String message3 = new String(arr, StandardCharsets.UTF_8) + "3";
try (ThreadContext threadContext = new ThreadContext(settings)) {
final Set<ThreadContext> threadContexts = Collections.singleton(threadContext);
// try to log three warning messages
logger.deprecated(threadContexts, message1);
logger.deprecated(threadContexts, message2);
logger.deprecated(threadContexts, message3);
final Map<String, List<String>> responseHeaders = threadContext.getResponseHeaders();
final List<String> responses = responseHeaders.get("Warning");
long warningHeadersSize = 0L;
for (String response : responses){
warningHeadersSize += "Warning".getBytes(StandardCharsets.UTF_8).length +
response.getBytes(StandardCharsets.UTF_8).length;
}
// assert that the size of all warning headers is less or equal to 1Kb
assertTrue(warningHeadersSize <= 1024);
}
}
private String range(int lowerInclusive, int upperInclusive) {
return IntStream
.range(lowerInclusive, upperInclusive + 1)

View File

@ -244,7 +244,7 @@ public class IndexModuleTests extends ESTestCase {
assertSame(listener, indexService.getIndexOperationListeners().get(1));
ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null);
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc);
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc);
ShardId shardId = new ShardId(new Index("foo", "bar"), 0);
for (IndexingOperationListener l : indexService.getIndexOperationListeners()) {
l.preIndex(shardId, index);

View File

@ -236,7 +236,7 @@ public class InternalEngineTests extends EngineTestCase {
assertEquals(2, searcher.reader().numDocs());
}
assertFalse("safe access should NOT be required last indexing round was only append only", engine.isSafeAccessRequired());
engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid()));
engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid(), primaryTerm.get()));
assertTrue("safe access should be required", engine.isSafeAccessRequired());
engine.refresh("test");
assertTrue("safe access should be required", engine.isSafeAccessRequired());
@ -318,7 +318,7 @@ public class InternalEngineTests extends EngineTestCase {
assertThat(segments.get(1).isCompound(), equalTo(true));
engine.delete(new Engine.Delete("test", "1", newUid(doc)));
engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get()));
engine.refresh("test");
segments = engine.segments(false);
@ -891,7 +891,7 @@ public class InternalEngineTests extends EngineTestCase {
searchResult.close();
// now delete
engine.delete(new Engine.Delete("test", "1", newUid(doc)));
engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get()));
// its not deleted yet
searchResult = engine.acquireSearcher("test");
@ -918,7 +918,7 @@ public class InternalEngineTests extends EngineTestCase {
document = testDocumentWithTextField();
document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE));
doc = testParsedDocument("1", null, document, B_1, null);
engine.index(new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED));
engine.index(new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED));
// its not there...
searchResult = engine.acquireSearcher("test");
@ -995,7 +995,7 @@ public class InternalEngineTests extends EngineTestCase {
// don't release the search result yet...
// delete, refresh and do a new search, it should not be there
engine.delete(new Engine.Delete("test", "1", newUid(doc)));
engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get()));
engine.refresh("test");
Engine.Searcher updateSearchResult = engine.acquireSearcher("test");
MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0));
@ -1114,7 +1114,7 @@ public class InternalEngineTests extends EngineTestCase {
engine.index(doc4);
assertEquals(engine.getLastWriteNanos(), doc4.startTime());
} else {
Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid());
Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid(), primaryTerm.get());
engine.delete(delete);
assertEquals(engine.getLastWriteNanos(), delete.startTime());
}
@ -1148,7 +1148,7 @@ public class InternalEngineTests extends EngineTestCase {
}
if (randomBoolean()) {
final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
SequenceNumbers.UNASSIGNED_SEQ_NO, shardId);
SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
}
trimUnsafeCommits(config);
@ -1178,7 +1178,7 @@ public class InternalEngineTests extends EngineTestCase {
public void testVersioningNewCreate() throws IOException {
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED);
Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED);
Engine.IndexResult indexResult = engine.index(create);
assertThat(indexResult.getVersion(), equalTo(1L));
@ -1190,7 +1190,7 @@ public class InternalEngineTests extends EngineTestCase {
public void testReplicatedVersioningWithFlush() throws IOException {
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED);
Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED);
Engine.IndexResult indexResult = engine.index(create);
assertThat(indexResult.getVersion(), equalTo(1L));
assertTrue(indexResult.isCreated());
@ -1209,7 +1209,7 @@ public class InternalEngineTests extends EngineTestCase {
replicaEngine.flush();
}
Engine.Index update = new Engine.Index(newUid(doc), doc, 1);
Engine.Index update = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 1);
Engine.IndexResult updateResult = engine.index(update);
assertThat(updateResult.getVersion(), equalTo(2L));
assertFalse(updateResult.isCreated());
@ -1238,14 +1238,14 @@ public class InternalEngineTests extends EngineTestCase {
final BiFunction<String, Engine.SearcherScope, Searcher> searcherFactory = engine::acquireSearcher;
ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null);
Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED);
Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED);
Engine.IndexResult indexResult = engine.index(create);
assertThat(indexResult.getVersion(), equalTo(1L));
try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) {
assertEquals(1, get.version());
}
Engine.Index update_1 = new Engine.Index(newUid(doc), doc, 1);
Engine.Index update_1 = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 1);
Engine.IndexResult update_1_result = engine.index(update_1);
assertThat(update_1_result.getVersion(), equalTo(2L));
@ -1253,7 +1253,7 @@ public class InternalEngineTests extends EngineTestCase {
assertEquals(2, get.version());
}
Engine.Index update_2 = new Engine.Index(newUid(doc), doc, 2);
Engine.Index update_2 = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 2);
Engine.IndexResult update_2_result = engine.index(update_2);
assertThat(update_2_result.getVersion(), equalTo(3L));
@ -1294,7 +1294,7 @@ public class InternalEngineTests extends EngineTestCase {
ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), B_1, null);
Engine.Index index = indexForDoc(doc);
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid()));
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get()));
engine.forceMerge(true, 10, true, false, false); //expunge deletes
engine.refresh("test");
@ -1306,7 +1306,7 @@ public class InternalEngineTests extends EngineTestCase {
doc = testParsedDocument(Integer.toString(1), null, testDocument(), B_1, null);
index = indexForDoc(doc);
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid()));
engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get()));
engine.forceMerge(true, 10, false, false, false); //expunge deletes
engine.refresh("test");
assertEquals(engine.segments(true).size(), 1);
@ -1772,7 +1772,7 @@ public class InternalEngineTests extends EngineTestCase {
indexResult = engine.index(index);
assertFalse(indexResult.isCreated());
engine.delete(new Engine.Delete("doc", "1", newUid(doc)));
engine.delete(new Engine.Delete("doc", "1", newUid(doc), primaryTerm.get()));
index = indexForDoc(doc);
indexResult = engine.index(index);
@ -2248,7 +2248,7 @@ public class InternalEngineTests extends EngineTestCase {
{
store.createEmpty();
final String translogUUID =
Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId);
Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null);
Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0,
@ -2289,7 +2289,7 @@ public class InternalEngineTests extends EngineTestCase {
// open index with new tlog
{
final String translogUUID =
Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId);
Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
trimUnsafeCommits(config);
try (InternalEngine engine = new InternalEngine(config)) {
@ -2324,7 +2324,8 @@ public class InternalEngineTests extends EngineTestCase {
// test that we can force start the engine , even if the translog is missing.
engine.close();
// fake a new translog, causing the engine to point to a missing one.
Translog translog = createTranslog();
final long primaryTerm = randomNonNegativeLong();
Translog translog = createTranslog(() -> primaryTerm);
long id = translog.currentFileGeneration();
translog.close();
IOUtils.rm(translog.location().resolve(Translog.getFilename(id)));
@ -2335,7 +2336,7 @@ public class InternalEngineTests extends EngineTestCase {
// expected
}
// when a new translog is created it should be ok
final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId);
final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm);
store.associateIndexWithNewTranslog(translogUUID);
EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null);
engine = new InternalEngine(config);
@ -2401,7 +2402,7 @@ public class InternalEngineTests extends EngineTestCase {
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get();
store.createEmpty();
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId);
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
try (InternalEngine engine =
new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null,
@ -2539,7 +2540,7 @@ public class InternalEngineTests extends EngineTestCase {
}
parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner();
assertEquals(flush ? 1 : 2, parser.appliedOperations());
engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc)));
engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc), primaryTerm.get()));
if (randomBoolean()) {
engine.refresh("test");
} else {
@ -2565,11 +2566,11 @@ public class InternalEngineTests extends EngineTestCase {
engine.close();
final Path badTranslogLog = createTempDir();
final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId);
final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
Translog translog = new Translog(
new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE),
badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED);
translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8"))));
badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8"))));
assertEquals(generation.translogFileGeneration, translog.currentFileGeneration());
translog.close();
@ -2583,7 +2584,7 @@ public class InternalEngineTests extends EngineTestCase {
new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(),
IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5),
config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(),
new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO);
new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get);
try {
InternalEngine internalEngine = new InternalEngine(brokenConfig);
fail("translog belongs to a different engine");
@ -2772,11 +2773,11 @@ public class InternalEngineTests extends EngineTestCase {
final Engine.DeleteResult deleteResult;
if (randomBoolean()) {
throwingIndexWriter.get().setThrowFailure(() -> new IOException("simulated"));
deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1)));
deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1), primaryTerm.get()));
assertThat(deleteResult.getFailure(), instanceOf(IOException.class));
} else {
throwingIndexWriter.get().setThrowFailure(() -> new IllegalArgumentException("simulated max token length"));
deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1)));
deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1), primaryTerm.get()));
assertThat(deleteResult.getFailure(),
instanceOf(IllegalArgumentException.class));
}
@ -2809,7 +2810,7 @@ public class InternalEngineTests extends EngineTestCase {
if (randomBoolean()) {
engine.index(indexForDoc(doc1));
} else {
engine.delete(new Engine.Delete("test", "", newUid(doc1)));
engine.delete(new Engine.Delete("test", "", newUid(doc1), primaryTerm.get()));
}
fail("engine should be closed");
} catch (Exception e) {
@ -3210,7 +3211,7 @@ public class InternalEngineTests extends EngineTestCase {
}
try (Store store = createStore(newFSDirectory(storeDir))) {
if (randomBoolean() || true) {
final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId);
final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
}
try (Engine engine = new InternalEngine(configSupplier.apply(store))) {
@ -3364,7 +3365,7 @@ public class InternalEngineTests extends EngineTestCase {
seqID = getSequenceID(engine, newGet(false, doc));
logger.info("--> got seqID: {}", seqID);
assertThat(seqID.v1(), equalTo(0L));
assertThat(seqID.v2(), equalTo(2L));
assertThat(seqID.v2(), equalTo(primaryTerm.get()));
// Index the same document again
document = testDocumentWithTextField();
@ -3376,7 +3377,7 @@ public class InternalEngineTests extends EngineTestCase {
seqID = getSequenceID(engine, newGet(false, doc));
logger.info("--> got seqID: {}", seqID);
assertThat(seqID.v1(), equalTo(1L));
assertThat(seqID.v2(), equalTo(2L));
assertThat(seqID.v2(), equalTo(primaryTerm.get()));
// Index the same document for the third time, this time changing the primary term
document = testDocumentWithTextField();
@ -3590,13 +3591,12 @@ public class InternalEngineTests extends EngineTestCase {
}
};
noOpEngine.recoverFromTranslog();
final long primaryTerm = randomNonNegativeLong();
final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm);
final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get());
final String reason = randomAlphaOfLength(16);
noOpEngine.noOp(
new Engine.NoOp(
maxSeqNo + 1,
primaryTerm,
primaryTerm.get(),
randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY),
System.nanoTime(),
reason));
@ -3614,7 +3614,7 @@ public class InternalEngineTests extends EngineTestCase {
assertThat(last, instanceOf(Translog.NoOp.class));
final Translog.NoOp noOp = (Translog.NoOp) last;
assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 1)));
assertThat(noOp.primaryTerm(), equalTo(primaryTerm));
assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get()));
assertThat(noOp.reason(), equalTo(reason));
} finally {
IOUtils.close(noOpEngine);
@ -3817,7 +3817,7 @@ public class InternalEngineTests extends EngineTestCase {
if (operation.opType() == Translog.Operation.Type.NO_OP) {
assertEquals(2, operation.primaryTerm());
} else {
assertEquals(1, operation.primaryTerm());
assertEquals(primaryTerm.get(), operation.primaryTerm());
}
}
@ -4017,7 +4017,7 @@ public class InternalEngineTests extends EngineTestCase {
store = createStore();
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
store.createEmpty();
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId);
final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUUID);
final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null,
@ -4087,7 +4087,7 @@ public class InternalEngineTests extends EngineTestCase {
Engine.Index operation = appendOnlyPrimary(doc, false, 1);
engine.index(operation);
if (rarely()) {
engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid()));
engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid(), primaryTerm.get()));
numDeletes.incrementAndGet();
} else {
doc = testParsedDocument(docID, null, testDocumentWithTextField("updated"),
@ -4226,7 +4226,7 @@ public class InternalEngineTests extends EngineTestCase {
engine.index(indexForDoc(doc));
}
assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false));
long flushThreshold = RandomNumbers.randomLongBetween(random(), 100,
long flushThreshold = RandomNumbers.randomLongBetween(random(), 120,
engine.getTranslog().stats().getUncommittedSizeInBytes()- extraTranslogSizeInNewEngine);
final IndexSettings indexSettings = engine.config().getIndexSettings();
final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData())
@ -4268,7 +4268,7 @@ public class InternalEngineTests extends EngineTestCase {
}
public void testStressShouldPeriodicallyFlush() throws Exception {
final long flushThreshold = randomLongBetween(100, 5000);
final long flushThreshold = randomLongBetween(120, 5000);
final long generationThreshold = randomLongBetween(1000, 5000);
final IndexSettings indexSettings = engine.config().getIndexSettings();
final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData())
@ -4309,7 +4309,7 @@ public class InternalEngineTests extends EngineTestCase {
Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false);
// first index an append only document and then delete it. such that we have it in the tombstones
engine.index(doc);
engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid()));
engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), primaryTerm.get()));
// now index more append only docs and refresh so we re-enabel the optimization for unsafe version map
ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null);
@ -4445,7 +4445,7 @@ public class InternalEngineTests extends EngineTestCase {
if (randomBoolean()) {
engine.index(indexForDoc(parsedDocument));
} else {
engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id())));
engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()), primaryTerm.get()));
}
}
}

View File

@ -0,0 +1,127 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.query;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.spans.SpanBoostQuery;
import org.apache.lucene.search.spans.SpanNearQuery;
import org.apache.lucene.search.spans.SpanQuery;
import org.apache.lucene.search.spans.SpanTermQuery;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.test.AbstractQueryTestCase;
import java.io.IOException;
import java.util.Iterator;
import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder;
import static org.hamcrest.CoreMatchers.containsString;
import static org.hamcrest.CoreMatchers.either;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.instanceOf;
/*
* SpanGapQueryBuilder, unlike other QBs, is not used to build a Query. Therefore, it is not suited
* to test pattern of AbstractQueryTestCase. Since it is only used in SpanNearQueryBuilder, its test cases
* are same as those of later with SpanGapQueryBuilder included as clauses.
*/
public class SpanGapQueryBuilderTests extends AbstractQueryTestCase<SpanNearQueryBuilder> {
@Override
protected SpanNearQueryBuilder doCreateTestQueryBuilder() {
SpanTermQueryBuilder[] spanTermQueries = new SpanTermQueryBuilderTests().createSpanTermQueryBuilders(randomIntBetween(1, 6));
SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder(spanTermQueries[0], randomIntBetween(-10, 10));
for (int i = 1; i < spanTermQueries.length; i++) {
SpanTermQueryBuilder termQB = spanTermQueries[i];
queryBuilder.addClause(termQB);
if (i % 2 == 1) {
SpanGapQueryBuilder gapQB = new SpanGapQueryBuilder(termQB.fieldName(), randomIntBetween(1,2));
queryBuilder.addClause(gapQB);
}
}
queryBuilder.inOrder(true);
return queryBuilder;
}
@Override
protected void doAssertLuceneQuery(SpanNearQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException {
assertThat(query, either(instanceOf(SpanNearQuery.class))
.or(instanceOf(SpanTermQuery.class))
.or(instanceOf(SpanBoostQuery.class))
.or(instanceOf(MatchAllQueryBuilder.class)));
if (query instanceof SpanNearQuery) {
SpanNearQuery spanNearQuery = (SpanNearQuery) query;
assertThat(spanNearQuery.getSlop(), equalTo(queryBuilder.slop()));
assertThat(spanNearQuery.isInOrder(), equalTo(queryBuilder.inOrder()));
assertThat(spanNearQuery.getClauses().length, equalTo(queryBuilder.clauses().size()));
Iterator<SpanQueryBuilder> spanQueryBuilderIterator = queryBuilder.clauses().iterator();
for (SpanQuery spanQuery : spanNearQuery.getClauses()) {
SpanQueryBuilder spanQB = spanQueryBuilderIterator.next();
if (spanQB instanceof SpanGapQueryBuilder) continue;
assertThat(spanQuery, equalTo(spanQB.toQuery(context.getQueryShardContext())));
}
} else if (query instanceof SpanTermQuery || query instanceof SpanBoostQuery) {
assertThat(queryBuilder.clauses().size(), equalTo(1));
assertThat(query, equalTo(queryBuilder.clauses().get(0).toQuery(context.getQueryShardContext())));
}
}
public void testIllegalArguments() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SpanGapQueryBuilder(null, 1));
assertEquals("[span_gap] field name is null or empty", e.getMessage());
}
public void testFromJson() throws IOException {
String json =
"{\n" +
" \"span_near\" : {\n" +
" \"clauses\" : [ {\n" +
" \"span_term\" : {\n" +
" \"field\" : {\n" +
" \"value\" : \"value1\",\n" +
" \"boost\" : 1.0\n" +
" }\n" +
" }\n" +
" }, {\n" +
" \"span_gap\" : {\n" +
" \"field\" : 2" +
" }\n" +
" }, {\n" +
" \"span_term\" : {\n" +
" \"field\" : {\n" +
" \"value\" : \"value3\",\n" +
" \"boost\" : 1.0\n" +
" }\n" +
" }\n" +
" } ],\n" +
" \"slop\" : 12,\n" +
" \"in_order\" : false,\n" +
" \"boost\" : 1.0\n" +
" }\n" +
"}";
SpanNearQueryBuilder parsed = (SpanNearQueryBuilder) parseQuery(json);
checkGeneratedJson(json, parsed);
assertEquals(json, 3, parsed.clauses().size());
assertEquals(json, 12, parsed.slop());
assertEquals(json, false, parsed.inOrder());
}
}

View File

@ -184,4 +184,5 @@ public class SpanNearQueryBuilderTests extends AbstractQueryTestCase<SpanNearQue
() -> parseQuery(json));
assertThat(e.getMessage(), containsString("[span_near] query does not support [collect_payloads]"));
}
}

View File

@ -71,4 +71,16 @@ public class TypeQueryBuilderTests extends AbstractQueryTestCase<TypeQueryBuilde
assertEquals(json, "my_type", parsed.type());
}
@Override
public void testToQuery() throws IOException {
super.testToQuery();
assertWarnings("The [type] query is deprecated, filter on a field instead.");
}
@Override
public void testMustRewrite() throws IOException {
super.testMustRewrite();
assertWarnings("The [type] query is deprecated, filter on a field instead.");
}
}

View File

@ -265,7 +265,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase
RecoverySource.PeerRecoverySource.INSTANCE);
final IndexShard newReplica =
newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting), () -> {});
newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting), () -> {}, EMPTY_EVENT_LISTENER);
replicas.add(newReplica);
updateAllocationIDsOnPrimary();
return newReplica;

View File

@ -186,7 +186,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
false,
SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON),
mapping -> {});
shards.promoteReplicaToPrimary(promotedReplica);
shards.promoteReplicaToPrimary(promotedReplica).get();
oldPrimary.close("demoted", randomBoolean());
oldPrimary.store().close();
shards.removeReplica(remainingReplica);

View File

@ -335,7 +335,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
assertFalse(shard.shouldPeriodicallyFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
new ByteSizeValue(160 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get();
new ByteSizeValue(190 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test", "test", "0")
.setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldPeriodicallyFlush());

View File

@ -72,11 +72,13 @@ import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.core.internal.io.IOUtils;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MergePolicyConfig;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.engine.EngineException;
import org.elasticsearch.index.engine.InternalEngineFactory;
import org.elasticsearch.index.engine.InternalEngine;
import org.elasticsearch.index.engine.Segment;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.fielddata.IndexFieldData;
@ -91,6 +93,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.index.translog.TestTranslog;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.index.translog.TranslogTests;
import org.elasticsearch.indices.IndicesQueryCache;
@ -520,6 +523,7 @@ public class IndexShardTests extends IndexShardTestCase {
// promote the replica
final ShardRouting replicaRouting = indexShard.routingEntry();
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 10000);
final ShardRouting primaryRouting =
newShardRouting(
replicaRouting.shardId(),
@ -528,7 +532,7 @@ public class IndexShardTests extends IndexShardTestCase {
true,
ShardRoutingState.STARTED,
replicaRouting.allocationId());
indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {},
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {},
0L, Collections.singleton(primaryRouting.allocationId().getId()),
new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet());
@ -554,6 +558,7 @@ public class IndexShardTests extends IndexShardTestCase {
latch.await();
assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1));
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
closeShards(indexShard);
}
@ -572,7 +577,10 @@ public class IndexShardTests extends IndexShardTestCase {
ShardRouting replicaRouting = indexShard.routingEntry();
ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null,
true, ShardRoutingState.STARTED, replicaRouting.allocationId());
indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, 0L,
final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000);
indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
}, 0L,
Collections.singleton(indexShard.routingEntry().allocationId().getId()),
new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(),
Collections.emptySet());
@ -740,6 +748,7 @@ public class IndexShardTests extends IndexShardTestCase {
@Override
public void onResponse(Releasable releasable) {
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint));
assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint));
onResponse.set(true);
@ -785,15 +794,18 @@ public class IndexShardTests extends IndexShardTestCase {
assertFalse(onResponse.get());
assertNull(onFailure.get());
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm));
Releasables.close(operation1);
// our operation should still be blocked
assertFalse(onResponse.get());
assertNull(onFailure.get());
assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm));
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm));
Releasables.close(operation2);
barrier.await();
// now lock acquisition should have succeeded
assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm));
assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm));
if (engineClosed) {
assertFalse(onResponse.get());
assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class));
@ -1741,7 +1753,7 @@ public class IndexShardTests extends IndexShardTestCase {
flushShard(shard);
assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1"));
// Simulate resync (without rollback): Noop #1, index #2
shard.primaryTerm++;
acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1);
shard.markSeqNoAsNoop(1, "test");
shard.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false,
SourceToParse.source(indexName, "doc", "doc-2", new BytesArray("{}"), XContentType.JSON), mapping);
@ -1859,7 +1871,7 @@ public class IndexShardTests extends IndexShardTestCase {
closeShards(shard);
IndexShard newShard = newShard(
ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE),
shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, new InternalEngineFactory(), () -> {});
shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, new InternalEngineFactory(), () -> {}, EMPTY_EVENT_LISTENER);
recoverShardFromStore(newShard);
@ -2005,7 +2017,7 @@ public class IndexShardTests extends IndexShardTestCase {
closeShards(shard);
IndexShard newShard = newShard(
ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE),
shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, new InternalEngineFactory(), () -> {});
shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, new InternalEngineFactory(), () -> {}, EMPTY_EVENT_LISTENER);
recoverShardFromStore(newShard);
@ -2054,18 +2066,18 @@ public class IndexShardTests extends IndexShardTestCase {
IndexMetaData metaData = IndexMetaData.builder("test")
.putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1).build();
.primaryTerm(0, randomLongBetween(1, Long.MAX_VALUE)).build();
IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null);
List<Translog.Operation> operations = new ArrayList<>();
int numTotalEntries = randomIntBetween(0, 10);
int numCorruptEntries = 0;
for (int i = 0; i < numTotalEntries; i++) {
if (randomBoolean()) {
operations.add(new Translog.Index("test", "1", 0, 1, VersionType.INTERNAL,
operations.add(new Translog.Index("test", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL,
"{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1));
} else {
// corrupt entry
operations.add(new Translog.Index("test", "2", 1, 1, VersionType.INTERNAL,
operations.add(new Translog.Index("test", "2", 1, primary.getPrimaryTerm(), 1, VersionType.INTERNAL,
"{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1));
numCorruptEntries++;
}
@ -2488,7 +2500,7 @@ public class IndexShardTests extends IndexShardTestCase {
.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix")))
.build();
final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData,
null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer());
null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER);
Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata();
assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1);
@ -2972,4 +2984,74 @@ public class IndexShardTests extends IndexShardTestCase {
breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING);
assertThat(breaker.getUsed(), equalTo(0L));
}
public void testFlushOnInactive() throws Exception {
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
IndexMetaData metaData = IndexMetaData.builder("test")
.putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}")
.settings(settings)
.primaryTerm(0, 1).build();
ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState
.INITIALIZING, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE);
final ShardId shardId = shardRouting.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
AtomicBoolean markedInactive = new AtomicBoolean();
AtomicReference<IndexShard> primaryRef = new AtomicReference<>();
IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, () -> {
}, new IndexEventListener() {
@Override
public void onShardInactive(IndexShard indexShard) {
markedInactive.set(true);
primaryRef.get().flush(new FlushRequest());
}
});
primaryRef.set(primary);
recoverShardFromStore(primary);
for (int i = 0; i < 3; i++) {
indexDoc(primary, "test", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}");
primary.refresh("test"); // produce segments
}
List<Segment> segments = primary.segments(false);
Set<String> names = new HashSet<>();
for (Segment segment : segments) {
assertFalse(segment.committed);
assertTrue(segment.search);
names.add(segment.getName());
}
assertEquals(3, segments.size());
primary.flush(new FlushRequest());
primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(false));
primary.refresh("test");
segments = primary.segments(false);
for (Segment segment : segments) {
if (names.contains(segment.getName())) {
assertTrue(segment.committed);
assertFalse(segment.search);
} else {
assertFalse(segment.committed);
assertTrue(segment.search);
}
}
assertEquals(4, segments.size());
assertFalse(markedInactive.get());
assertBusy(() -> {
primary.checkIdle(0);
assertFalse(primary.isActive());
});
assertTrue(markedInactive.get());
segments = primary.segments(false);
assertEquals(1, segments.size());
for (Segment segment : segments) {
assertTrue(segment.committed);
assertTrue(segment.search);
}
closeShards(primary);
}
}

View File

@ -136,8 +136,8 @@ public class IndexingOperationListenerTests extends ESTestCase{
IndexingOperationListener.CompositeListener compositeListener =
new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger);
ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null);
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())));
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc);
Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong());
Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc);
compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true));
assertEquals(0, preIndex.get());
assertEquals(0, postIndex.get());

View File

@ -122,14 +122,15 @@ public class RefreshListenersTests extends ESTestCase {
}
};
store.createEmpty();
final long primaryTerm = randomNonNegativeLong();
final String translogUUID =
Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId);
Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm);
store.associateIndexWithNewTranslog(translogUUID);
EngineConfig config = new EngineConfig(shardId, allocationId, threadPool,
indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger),
eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null,
(e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED);
(e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm);
engine = new InternalEngine(config);
engine.recoverFromTranslog();
listeners.setTranslog(engine.getTranslog());
@ -363,7 +364,7 @@ public class RefreshListenersTests extends ESTestCase {
BytesReference source = new BytesArray(new byte[] { 1 });
ParsedDocument doc = new ParsedDocument(versionField, seqID, id, "test", null, Arrays.asList(document), source, XContentType.JSON,
null);
Engine.Index index = new Engine.Index(new Term("_id", doc.id()), doc);
Engine.Index index = new Engine.Index(new Term("_id", doc.id()), engine.config().getPrimaryTermSupplier().getAsLong(), doc);
return engine.index(index);
}

View File

@ -83,26 +83,7 @@ public class TestTranslog {
int corruptions = RandomNumbers.randomIntBetween(random, 5, 20);
for (int i = 0; i < corruptions; i++) {
Path fileToCorrupt = RandomPicks.randomFrom(random, candidates);
try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
// read
raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1));
long filePointer = raf.position();
ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
raf.read(bb);
bb.flip();
// corrupt
byte oldValue = bb.get(0);
byte newValue = (byte) (oldValue + 1);
bb.put(0, newValue);
// rewrite
raf.position(filePointer);
raf.write(bb);
logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}",
fileToCorrupt, filePointer, Integer.toHexString(oldValue),
Integer.toHexString(newValue), fileToCorrupt);
}
corruptFile(logger, random, fileToCorrupt);
corruptedFiles.add(fileToCorrupt);
}
}
@ -110,6 +91,29 @@ public class TestTranslog {
return corruptedFiles;
}
static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException {
try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) {
// read
raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1));
long filePointer = raf.position();
ByteBuffer bb = ByteBuffer.wrap(new byte[1]);
raf.read(bb);
bb.flip();
// corrupt
byte oldValue = bb.get(0);
byte newValue = (byte) (oldValue + 1);
bb.put(0, newValue);
// rewrite
raf.position(filePointer);
raf.write(bb);
logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}",
fileToCorrupt, filePointer, Integer.toHexString(oldValue),
Integer.toHexString(newValue), fileToCorrupt);
}
}
/**
* Lists all existing commits in a given index path, then read the minimum translog generation that will be used in recoverFromTranslog.
*/
@ -122,4 +126,11 @@ public class TestTranslog {
return Long.parseLong(recoveringCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY));
}
}
/**
* Returns the primary term associated with the current translog writer of the given translog.
*/
public static long getCurrentTerm(Translog translog) {
return translog.getCurrent().getPrimaryTerm();
}
}

View File

@ -171,7 +171,7 @@ public class TranslogDeletionPolicyTests extends ESTestCase {
}
writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen,
tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L,
() -> 1L);
() -> 1L, randomNonNegativeLong());
writer = Mockito.spy(writer);
Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime();

View File

@ -0,0 +1,128 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.OutputStreamDataOutput;
import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.nio.channels.Channels;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.lessThan;
public class TranslogHeaderTests extends ESTestCase {
public void testCurrentHeaderVersion() throws Exception {
final String translogUUID = UUIDs.randomBase64UUID();
final TranslogHeader outHeader = new TranslogHeader(translogUUID, randomNonNegativeLong());
final long generation = randomNonNegativeLong();
final Path translogFile = createTempDir().resolve(Translog.getFilename(generation));
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) {
outHeader.write(channel);
assertThat(outHeader.sizeInBytes(), equalTo((int)channel.position()));
}
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel);
assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID));
assertThat(inHeader.getPrimaryTerm(), equalTo(outHeader.getPrimaryTerm()));
assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position()));
}
final TranslogCorruptedException mismatchUUID = expectThrows(TranslogCorruptedException.class, () -> {
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel);
}
});
assertThat(mismatchUUID.getMessage(), containsString("this translog file belongs to a different translog"));
int corruptions = between(1, 10);
for (int i = 0; i < corruptions; i++) {
TestTranslog.corruptFile(logger, random(), translogFile);
}
expectThrows(TranslogCorruptedException.class, () -> {
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
TranslogHeader.read(outHeader.getTranslogUUID(), translogFile, channel);
}
});
}
public void testHeaderWithoutPrimaryTerm() throws Exception {
final String translogUUID = UUIDs.randomBase64UUID();
final long generation = randomNonNegativeLong();
final Path translogFile = createTempDir().resolve(Translog.getFilename(generation));
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) {
writeHeaderWithoutTerm(channel, translogUUID);
assertThat((int)channel.position(), lessThan(TranslogHeader.headerSizeInBytes(translogUUID)));
}
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel);
assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID));
assertThat(inHeader.getPrimaryTerm(), equalTo(TranslogHeader.UNKNOWN_PRIMARY_TERM));
assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position()));
}
expectThrows(TranslogCorruptedException.class, () -> {
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel);
}
});
}
static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException {
final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel));
CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS);
final BytesRef uuid = new BytesRef(translogUUID);
out.writeInt(uuid.length);
out.writeBytes(uuid.bytes, uuid.offset, uuid.length);
channel.force(true);
assertThat(channel.position(), equalTo(43L));
}
public void testLegacyTranslogVersions() throws Exception {
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", IllegalStateException.class, "pre-1.4 translog");
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", IllegalStateException.class, "pre-2.0 translog");
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", IllegalStateException.class, "pre-2.0 translog");
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary",
TranslogCorruptedException.class, "translog looks like version 1 or later, but has corrupted header");
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary",
IllegalStateException.class, "pre-2.0 translog");
}
private <E extends Exception> void checkFailsToOpen(String file, Class<E> expectedErrorType, String expectedMessage) {
final Path translogFile = getDataPath(file);
assertThat("test file [" + translogFile + "] should exist", Files.exists(translogFile), equalTo(true));
final E error = expectThrows(expectedErrorType, () -> {
final Checkpoint checkpoint = new Checkpoint(Files.size(translogFile), 1, 1,
SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, 1);
try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) {
TranslogReader.open(channel, translogFile, checkpoint, null);
}
});
assertThat(error.getMessage(), containsString(expectedMessage));
}
}

View File

@ -110,6 +110,7 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.LongStream;
import java.util.stream.Stream;
import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE;
import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder;
@ -133,6 +134,8 @@ public class TranslogTests extends ESTestCase {
protected Translog translog;
private AtomicLong globalCheckpoint;
protected Path translogDir;
// A default primary term is used by translog instances created in this test.
private final AtomicLong primaryTerm = new AtomicLong();
@Override
protected void afterIfSuccessful() throws Exception {
@ -153,14 +156,14 @@ public class TranslogTests extends ESTestCase {
protected Translog createTranslog(TranslogConfig config) throws IOException {
String translogUUID =
Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId);
Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()),
() -> SequenceNumbers.NO_OPS_PERFORMED);
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
}
protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException {
return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()),
() -> SequenceNumbers.NO_OPS_PERFORMED);
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
}
@ -189,6 +192,7 @@ public class TranslogTests extends ESTestCase {
@Before
public void setUp() throws Exception {
super.setUp();
primaryTerm.set(randomLongBetween(1, Integer.MAX_VALUE));
// if a previous test failed we clean up things here
translogDir = createTempDir();
translog = create(translogDir);
@ -209,8 +213,8 @@ public class TranslogTests extends ESTestCase {
globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
final TranslogConfig translogConfig = getTranslogConfig(path);
final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings());
final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId);
return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get());
final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get);
}
private TranslogConfig getTranslogConfig(final Path path) {
@ -304,22 +308,22 @@ public class TranslogTests extends ESTestCase {
assertThat(snapshot, SnapshotMatchers.size(0));
}
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
}
addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, newUid("2")));
addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, primaryTerm.get(), newUid("2")));
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
assertThat(snapshot.totalOperations(), equalTo(ops.size()));
}
final long seqNo = randomNonNegativeLong();
final long primaryTerm = randomNonNegativeLong();
final String reason = randomAlphaOfLength(16);
addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, primaryTerm, reason));
final long noopTerm = randomLongBetween(1, primaryTerm.get());
addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, noopTerm, reason));
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
@ -334,7 +338,7 @@ public class TranslogTests extends ESTestCase {
Translog.NoOp noOp = (Translog.NoOp) snapshot.next();
assertNotNull(noOp);
assertThat(noOp.seqNo(), equalTo(seqNo));
assertThat(noOp.primaryTerm(), equalTo(primaryTerm));
assertThat(noOp.primaryTerm(), equalTo(noopTerm));
assertThat(noOp.reason(), equalTo(reason));
assertNull(snapshot.next());
@ -402,35 +406,35 @@ public class TranslogTests extends ESTestCase {
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(0));
}
assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC)));
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogHeader.TRANSLOG_CODEC)));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(1));
assertThat(stats.getTranslogSizeInBytes(), equalTo(139L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(163L));
assertThat(stats.getUncommittedOperations(), equalTo(1));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(139L));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(163L));
assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L));
}
translog.add(new Translog.Delete("test", "2", 1, newUid("2")));
translog.add(new Translog.Delete("test", "2", 1, primaryTerm.get(), newUid("2")));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(2));
assertThat(stats.getTranslogSizeInBytes(), equalTo(188L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(212L));
assertThat(stats.getUncommittedOperations(), equalTo(2));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(188L));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(212L));
assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L));
}
translog.add(new Translog.Delete("test", "3", 2, newUid("3")));
translog.add(new Translog.Delete("test", "3", 2, primaryTerm.get(), newUid("3")));
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(3));
assertThat(stats.getTranslogSizeInBytes(), equalTo(237L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(261L));
assertThat(stats.getUncommittedOperations(), equalTo(3));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(237L));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(261L));
assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L));
}
@ -438,13 +442,13 @@ public class TranslogTests extends ESTestCase {
{
final TranslogStats stats = stats();
assertThat(stats.estimatedNumberOfOperations(), equalTo(4));
assertThat(stats.getTranslogSizeInBytes(), equalTo(279L));
assertThat(stats.getTranslogSizeInBytes(), equalTo(303L));
assertThat(stats.getUncommittedOperations(), equalTo(4));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(279L));
assertThat(stats.getUncommittedSizeInBytes(), equalTo(303L));
assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L));
}
final long expectedSizeInBytes = 322L;
final long expectedSizeInBytes = 358L;
translog.rollGeneration();
{
final TranslogStats stats = stats();
@ -495,7 +499,7 @@ public class TranslogTests extends ESTestCase {
int uncommittedOps = 0;
int operationsInLastGen = 0;
for (int i = 0; i < operations; i++) {
translog.add(new Translog.Index("test", Integer.toString(i), i, new byte[]{1}));
translog.add(new Translog.Index("test", Integer.toString(i), i, primaryTerm.get(), new byte[]{1}));
uncommittedOps++;
operationsInLastGen++;
if (rarely()) {
@ -564,7 +568,7 @@ public class TranslogTests extends ESTestCase {
assertThat(snapshot, SnapshotMatchers.size(0));
}
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
assertThat(snapshot, SnapshotMatchers.equalsTo(ops));
@ -584,9 +588,9 @@ public class TranslogTests extends ESTestCase {
public void testReadLocation() throws IOException {
ArrayList<Translog.Operation> ops = new ArrayList<>();
ArrayList<Translog.Location> locs = new ArrayList<>();
locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})));
locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{1})));
locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{1})));
locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})));
locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{1})));
locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{1})));
int i = 0;
for (Translog.Operation op : ops) {
assertEquals(op, translog.readOperation(locs.get(i++)));
@ -602,16 +606,16 @@ public class TranslogTests extends ESTestCase {
toClose.add(snapshot);
assertThat(snapshot, SnapshotMatchers.size(0));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}));
addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
Translog.Snapshot snapshot1 = translog.newSnapshot();
toClose.add(snapshot1);
addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{2}));
addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2}));
assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0)));
translog.rollGeneration();
addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{3}));
addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3}));
Translog.Snapshot snapshot2 = translog.newSnapshot();
toClose.add(snapshot2);
@ -625,7 +629,7 @@ public class TranslogTests extends ESTestCase {
public void testSnapshotOnClosedTranslog() throws IOException {
assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1))));
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
translog.close();
try {
Translog.Snapshot snapshot = translog.newSnapshot();
@ -748,7 +752,7 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8"))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
}
translog.sync();
@ -775,7 +779,7 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
String ascii = randomAlphaOfLengthBetween(1, 50);
locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8"))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8"))));
}
translog.sync();
@ -839,7 +843,7 @@ public class TranslogTests extends ESTestCase {
public void testVerifyTranslogIsNotDeleted() throws IOException {
assertFileIsPresent(translog, 1);
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
try (Translog.Snapshot snapshot = translog.newSnapshot()) {
assertThat(snapshot, SnapshotMatchers.size(1));
assertFileIsPresent(translog, 1);
@ -891,10 +895,10 @@ public class TranslogTests extends ESTestCase {
switch (type) {
case CREATE:
case INDEX:
op = new Translog.Index("type", "" + id, id, new byte[]{(byte) id});
op = new Translog.Index("type", "" + id, id, primaryTerm.get(), new byte[]{(byte) id});
break;
case DELETE:
op = new Translog.Delete("test", Long.toString(id), id, newUid(Long.toString(id)));
op = new Translog.Delete("test", Long.toString(id), id, primaryTerm.get(), newUid(Long.toString(id)));
break;
case NO_OP:
op = new Translog.NoOp(id, 1, Long.toString(id));
@ -1053,13 +1057,13 @@ public class TranslogTests extends ESTestCase {
for (int op = 0; op < translogOperations; op++) {
int seqNo = ++count;
final Translog.Location location =
translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
assertTrue("at least one operation pending", translog.syncNeeded());
assertTrue("this operation has not been synced", translog.ensureSynced(location));
assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced
seqNo = ++count;
translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8"))));
assertTrue("one pending operation", translog.syncNeeded());
assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now
assertTrue("we only synced a previous operation yet", translog.syncNeeded());
@ -1088,7 +1092,7 @@ public class TranslogTests extends ESTestCase {
rollAndCommit(translog); // do this first so that there is at least one pending tlog entry
}
final Translog.Location location =
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))));
locations.add(location);
}
Collections.shuffle(locations, random());
@ -1116,7 +1120,7 @@ public class TranslogTests extends ESTestCase {
int count = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))));
if (rarely() && translogOperations > op + 1) {
rollAndCommit(translog);
}
@ -1153,7 +1157,7 @@ public class TranslogTests extends ESTestCase {
int lastSynced = -1;
long lastSyncedGlobalCheckpoint = globalCheckpoint.get();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16));
}
@ -1164,8 +1168,8 @@ public class TranslogTests extends ESTestCase {
}
}
assertEquals(translogOperations, translog.totalOperations());
translog.add(new Translog.Index(
"test", "" + translogOperations, translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + translogOperations, translogOperations, primaryTerm.get(),
Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8"))));
final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME));
try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) {
@ -1289,7 +1293,7 @@ public class TranslogTests extends ESTestCase {
int minUncommittedOp = -1;
final boolean commitOften = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
final boolean commit = commitOften ? frequently() : rarely();
if (commit && op < translogOperations - 1) {
rollAndCommit(translog);
@ -1310,7 +1314,7 @@ public class TranslogTests extends ESTestCase {
assertNull(snapshot.next());
}
} else {
translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED);
translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) {
@ -1332,7 +1336,7 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@ -1349,7 +1353,7 @@ public class TranslogTests extends ESTestCase {
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
@ -1363,7 +1367,7 @@ public class TranslogTests extends ESTestCase {
}
}
if (randomBoolean()) { // recover twice
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice",
translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
@ -1388,7 +1392,7 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@ -1409,7 +1413,7 @@ public class TranslogTests extends ESTestCase {
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
@ -1424,7 +1428,7 @@ public class TranslogTests extends ESTestCase {
}
if (randomBoolean()) { // recover twice
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice",
translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration());
@ -1448,7 +1452,7 @@ public class TranslogTests extends ESTestCase {
Translog.TranslogGeneration translogGeneration = null;
final boolean sync = randomBoolean();
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (op == prepareOp) {
translogGeneration = translog.getGeneration();
translog.rollGeneration();
@ -1467,15 +1471,15 @@ public class TranslogTests extends ESTestCase {
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
fail("corrupted");
} catch (IllegalStateException ex) {
assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3068, " +
assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3080, " +
"numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " +
"generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0}", ex.getMessage());
}
Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertNotNull(translogGeneration);
assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration());
assertFalse(translog.syncNeeded());
@ -1495,7 +1499,7 @@ public class TranslogTests extends ESTestCase {
List<Translog.Operation> ops = new ArrayList<>();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations; op++) {
Translog.Index test = new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")));
Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")));
ops.add(test);
}
Translog.writeOperations(out, ops);
@ -1510,8 +1514,8 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(10, 100);
try (Translog translog2 = create(createTempDir())) {
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations2.add(translog2.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
}
int iters = randomIntBetween(10, 100);
for (int i = 0; i < iters; i++) {
@ -1537,7 +1541,7 @@ public class TranslogTests extends ESTestCase {
int translogOperations = randomIntBetween(1, 10);
int firstUncommitted = 0;
for (int op = 0; op < translogOperations; op++) {
locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))));
if (randomBoolean()) {
rollAndCommit(translog);
firstUncommitted = op + 1;
@ -1552,12 +1556,12 @@ public class TranslogTests extends ESTestCase {
final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1,
translogGeneration.translogUUID.length());
try {
new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED);
new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
fail("translog doesn't belong to this UUID");
} catch (TranslogCorruptedException ex) {
}
this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED);
this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) {
for (int i = firstUncommitted; i < translogOperations; i++) {
Translog.Operation next = snapshot.next();
@ -1569,10 +1573,10 @@ public class TranslogTests extends ESTestCase {
}
public void testFailOnClosedWrite() throws IOException {
translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
fail("closed");
} catch (AlreadyClosedException ex) {
// all is well
@ -1610,7 +1614,7 @@ public class TranslogTests extends ESTestCase {
}
}
private static class TranslogThread extends Thread {
private class TranslogThread extends Thread {
private final CountDownLatch downLatch;
private final int opsPerThread;
private final int threadId;
@ -1641,19 +1645,19 @@ public class TranslogTests extends ESTestCase {
case CREATE:
case INDEX:
op = new Translog.Index("test", threadId + "_" + opCount, seqNoGenerator.getAndIncrement(),
randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
primaryTerm.get(), randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8"));
break;
case DELETE:
op = new Translog.Delete(
"test", threadId + "_" + opCount,
new Term("_uid", threadId + "_" + opCount),
seqNoGenerator.getAndIncrement(),
0,
primaryTerm.get(),
1 + randomInt(100000),
randomFrom(VersionType.values()));
break;
case NO_OP:
op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), randomNonNegativeLong(), randomAlphaOfLength(16));
op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), primaryTerm.get(), randomAlphaOfLength(16));
break;
default:
throw new AssertionError("unsupported operation type [" + type + "]");
@ -1691,7 +1695,7 @@ public class TranslogTests extends ESTestCase {
while (failed == false) {
try {
locations.add(translog.add(
new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
translog.sync();
opsSynced++;
} catch (MockDirectoryWrapper.FakeIOException ex) {
@ -1712,7 +1716,7 @@ public class TranslogTests extends ESTestCase {
if (randomBoolean()) {
try {
locations.add(translog.add(
new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8")))));
fail("we are already closed");
} catch (AlreadyClosedException ex) {
assertNotNull(ex.getCause());
@ -1746,7 +1750,7 @@ public class TranslogTests extends ESTestCase {
translog.close(); // we are closed
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy();
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration());
assertFalse(tlog.syncNeeded());
@ -1769,7 +1773,7 @@ public class TranslogTests extends ESTestCase {
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
locations.add(translog.add(
new Translog.Index("test", "" + opsAdded, opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))));
try (Translog.Snapshot snapshot = this.translog.newSnapshot()) {
assertEquals(opsAdded + 1, snapshot.totalOperations());
for (int i = 0; i < opsAdded; i++) {
@ -1788,11 +1792,11 @@ public class TranslogTests extends ESTestCase {
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy());
LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly
translog.add(new Translog.Index("test", "1", 0, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
fail.failAlways();
try {
Translog.Location location = translog.add(
new Translog.Index("test", "2", 1, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
new Translog.Index("test", "2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))));
if (randomBoolean()) {
translog.ensureSynced(location);
} else {
@ -1882,7 +1886,7 @@ public class TranslogTests extends ESTestCase {
}
}
try (Translog tlog =
new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED);
new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
Translog.Snapshot snapshot = tlog.newSnapshot()) {
if (writtenOperations.size() != snapshot.totalOperations()) {
for (int i = 0; i < threadCount; i++) {
@ -1909,7 +1913,7 @@ public class TranslogTests extends ESTestCase {
public void testRecoveryFromAFutureGenerationCleansUp() throws IOException {
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -1917,7 +1921,7 @@ public class TranslogTests extends ESTestCase {
translog.rollGeneration();
long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -1928,7 +1932,7 @@ public class TranslogTests extends ESTestCase {
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE));
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED);
translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
assertThat(translog.getMinFileGeneration(), equalTo(1L));
// no trimming done yet, just recovered
for (long gen = 1; gen < translog.currentFileGeneration(); gen++) {
@ -1959,7 +1963,7 @@ public class TranslogTests extends ESTestCase {
translogUUID = translog.getTranslogUUID();
int translogOperations = randomIntBetween(10, 100);
for (int op = 0; op < translogOperations / 2; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -1967,7 +1971,7 @@ public class TranslogTests extends ESTestCase {
translog.rollGeneration();
comittedGeneration = randomLongBetween(2, translog.currentFileGeneration());
for (int op = translogOperations / 2; op < translogOperations; op++) {
translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))));
if (rarely()) {
translog.rollGeneration();
}
@ -1984,7 +1988,7 @@ public class TranslogTests extends ESTestCase {
final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1);
deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE));
deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration);
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
// we don't know when things broke exactly
assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L));
assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration));
@ -2048,9 +2052,9 @@ public class TranslogTests extends ESTestCase {
};
if (translogUUID == null) {
translogUUID = Translog.createEmptyTranslog(
config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory);
config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory, primaryTerm.get());
}
return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED) {
return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) {
@Override
ChannelFactory getChannelFactory() {
return channelFactory;
@ -2158,10 +2162,10 @@ public class TranslogTests extends ESTestCase {
Path tempDir = createTempDir();
TranslogConfig config = getTranslogConfig(tempDir);
Translog translog = createTranslog(config);
translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8"))));
translog.close();
try {
new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED) {
new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) {
@Override
protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint)
throws IOException {
@ -2176,7 +2180,7 @@ public class TranslogTests extends ESTestCase {
}
public void testRecoverWithUnbackedNextGen() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
@ -2192,7 +2196,7 @@ public class TranslogTests extends ESTestCase {
assertNotNull("operation 1 must be non-null", op);
assertEquals("payload mismatch for operation 1", 1, Integer.parseInt(op.getSource().source.utf8ToString()));
tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(2).getBytes(Charset.forName("UTF-8"))));
tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(2).getBytes(Charset.forName("UTF-8"))));
}
try (Translog tlog = openTranslog(config, translog.getTranslogUUID());
@ -2210,7 +2214,7 @@ public class TranslogTests extends ESTestCase {
}
public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME);
@ -2219,7 +2223,7 @@ public class TranslogTests extends ESTestCase {
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
try {
Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED);
Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
fail("file already exists?");
} catch (TranslogException ex) {
// all is well
@ -2229,7 +2233,7 @@ public class TranslogTests extends ESTestCase {
}
public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException {
translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8"))));
translog.close();
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
@ -2241,7 +2245,7 @@ public class TranslogTests extends ESTestCase {
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog"));
// we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition
Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog"));
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) {
try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) {
assertFalse(tlog.syncNeeded());
try (Translog.Snapshot snapshot = tlog.newSnapshot()) {
for (int i = 0; i < 1; i++) {
@ -2250,11 +2254,11 @@ public class TranslogTests extends ESTestCase {
assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString()));
}
}
tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8"))));
}
try {
Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED);
Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
fail("file already exists?");
} catch (TranslogException ex) {
// all is well
@ -2290,7 +2294,7 @@ public class TranslogTests extends ESTestCase {
LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly
for (int opsAdded = 0; opsAdded < numOps; opsAdded++) {
String doc = lineFileDocs.nextDoc().toString();
failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, doc.getBytes(Charset.forName("UTF-8"))));
failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), doc.getBytes(Charset.forName("UTF-8"))));
unsynced.add(doc);
if (randomBoolean()) {
failableTLog.sync();
@ -2362,9 +2366,9 @@ public class TranslogTests extends ESTestCase {
deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery);
if (generationUUID == null) {
// we never managed to successfully create a translog, make it
generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId);
generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
}
try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED);
try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) {
assertEquals(syncedDocs.size(), snapshot.totalOperations());
for (int i = 0; i < syncedDocs.size(); i++) {
@ -2423,20 +2427,20 @@ public class TranslogTests extends ESTestCase {
* Tests that closing views after the translog is fine and we can reopen the translog
*/
public void testPendingDelete() throws IOException {
translog.add(new Translog.Index("test", "1", 0, new byte[]{1}));
translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}));
translog.rollGeneration();
TranslogConfig config = translog.getConfig();
final String translogUUID = translog.getTranslogUUID();
final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings());
translog.close();
translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED);
translog.add(new Translog.Index("test", "2", 1, new byte[]{2}));
translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
translog.add(new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2}));
translog.rollGeneration();
Closeable lock = translog.acquireRetentionLock();
translog.add(new Translog.Index("test", "3", 2, new byte[]{3}));
translog.add(new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3}));
translog.close();
IOUtils.close(lock);
translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED);
translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get);
}
public static Translog.Location randomTranslogLocation() {
@ -2501,20 +2505,31 @@ public class TranslogTests extends ESTestCase {
final int rolls = randomIntBetween(1, 16);
int totalOperations = 0;
int seqNo = 0;
final List<Long> primaryTerms = new ArrayList<>();
primaryTerms.add(primaryTerm.get()); // We always create an empty translog.
primaryTerms.add(primaryTerm.get());
for (int i = 0; i < rolls; i++) {
final int operations = randomIntBetween(1, 128);
for (int j = 0; j < operations; j++) {
translog.add(new Translog.NoOp(seqNo++, 0, "test"));
translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test"));
totalOperations++;
}
try (ReleasableLock ignored = translog.writeLock.acquire()) {
if (randomBoolean()){
primaryTerm.incrementAndGet();
}
translog.rollGeneration();
primaryTerms.add(primaryTerm.get());
}
assertThat(translog.currentFileGeneration(), equalTo(generation + i + 1));
assertThat(translog.getCurrent().getPrimaryTerm(), equalTo(primaryTerm.get()));
assertThat(translog.totalOperations(), equalTo(totalOperations));
}
for (int i = 0; i <= rolls; i++) {
assertFileIsPresent(translog, generation + i);
final List<Long> storedPrimaryTerms = Stream.concat(translog.getReaders().stream(), Stream.of(translog.getCurrent()))
.map(t -> t.getPrimaryTerm()).collect(Collectors.toList());
assertThat(storedPrimaryTerms, equalTo(primaryTerms));
}
long minGenForRecovery = randomLongBetween(generation, generation + rolls);
commit(translog, minGenForRecovery, generation + rolls);
@ -2649,8 +2664,11 @@ public class TranslogTests extends ESTestCase {
final int operations = randomIntBetween(1, 4096);
long seqNo = 0;
for (int i = 0; i < operations; i++) {
translog.add(new Translog.NoOp(seqNo++, 0, "test'"));
translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test'"));
if (rarely()) {
if (rarely()) {
primaryTerm.incrementAndGet();
}
translog.rollGeneration();
}
}
@ -2707,7 +2725,7 @@ public class TranslogTests extends ESTestCase {
for (int gen = 0; gen < generations; gen++) {
final int operations = randomIntBetween(1, 100);
for (int i = 0; i < operations; i++) {
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), new byte[]{1});
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1});
translog.add(op);
views.peek().add(op);
}
@ -2732,7 +2750,7 @@ public class TranslogTests extends ESTestCase {
List<Long> batch = LongStream.rangeClosed(0, between(0, 500)).boxed().collect(Collectors.toList());
Randomness.shuffle(batch);
for (Long seqNo : batch) {
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, new byte[]{1});
Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1});
translog.add(op);
latestOperations.put(op.seqNo(), op);
}

View File

@ -1,96 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.translog;
import org.elasticsearch.index.seqno.SequenceNumbers;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
/**
* Tests for reading old and new translog files
*/
public class TranslogVersionTests extends ESTestCase {
private void checkFailsToOpen(String file, String expectedMessage) throws IOException {
Path translogFile = getDataPath(file);
assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
try {
openReader(translogFile, 0);
fail("should be able to open an old translog");
} catch (IllegalStateException e) {
assertThat(e.getMessage(), containsString(expectedMessage));
}
}
public void testV0LegacyTranslogVersion() throws Exception {
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", "pre-1.4 translog");
}
public void testV1ChecksummedTranslogVersion() throws Exception {
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", "pre-2.0 translog");
}
public void testCorruptedTranslogs() throws Exception {
try {
Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary");
assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
openReader(translogFile, 0);
fail("should have thrown an exception about the header being corrupt");
} catch (TranslogCorruptedException e) {
assertThat("translog corruption from header: " + e.getMessage(),
e.getMessage().contains("translog looks like version 1 or later, but has corrupted header"), equalTo(true));
}
try {
Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-invalid-first-byte.binary");
assertThat("test file should exist", Files.exists(translogFile), equalTo(true));
openReader(translogFile, 0);
fail("should have thrown an exception about the header being corrupt");
} catch (TranslogCorruptedException e) {
assertThat("translog corruption from header: " + e.getMessage(),
e.getMessage().contains("Invalid first byte in translog file, got: 1, expected 0x00 or 0x3f"), equalTo(true));
}
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary", "pre-2.0 translog");
}
public void testTruncatedTranslog() throws Exception {
checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", "pre-2.0 translog");
}
public TranslogReader openReader(final Path path, final long id) throws IOException {
try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) {
final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED;
final Checkpoint checkpoint =
new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO, id);
return TranslogReader.open(channel, path, checkpoint, null);
}
}
}

View File

@ -241,7 +241,7 @@ public class FlushIT extends ESIntegTestCase {
private void indexDoc(Engine engine, String id) throws IOException {
final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null);
final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc));
final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), 1L, doc));
assertThat(indexResult.getFailure(), nullValue());
}

View File

@ -268,7 +268,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
final BytesReference source = new BytesArray(new byte[] { 1 });
final ParsedDocument doc =
new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, XContentType.JSON, null);
return new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc);
return new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc);
}
public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable {

View File

@ -200,7 +200,8 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase {
final String historyUUIDtoUse = UUIDs.randomBase64UUID(random());
if (randomBoolean()) {
// create a new translog
translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, replica.shardId());
translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs,
replica.shardId(), replica.getPrimaryTerm());
translogGenToUse = 1;
} else {
translogUUIDtoUse = translogGeneration.translogUUID;

View File

@ -99,7 +99,8 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase {
// build a new shard using the same store directory as the closed shard
ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE);
shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null,
new InternalEngineFactory(), () -> {});
new InternalEngineFactory(), () -> {},
EMPTY_EVENT_LISTENER);
// restore the shard
recoverShardFromSnapshot(shard, snapshot, repository);

View File

@ -324,6 +324,7 @@ public class SearchModuleTests extends ModuleTestCase {
"simple_query_string",
"span_containing",
"span_first",
"span_gap",
"span_multi",
"span_near",
"span_not",

View File

@ -21,15 +21,12 @@ package org.elasticsearch.search;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchType;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptType;
import org.elasticsearch.search.aggregations.AggregatorFactories;
import org.elasticsearch.search.query.QueryPhaseExecutionException;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Collection;
@ -57,7 +54,10 @@ public class SearchTimeoutIT extends ESIntegTestCase {
}
public void testSimpleTimeout() throws Exception {
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
for (int i = 0; i < 32; i++) {
client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").get();
}
refresh("test");
SearchResponse searchResponse = client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS))
.setQuery(scriptQuery(
@ -66,19 +66,19 @@ public class SearchTimeoutIT extends ESIntegTestCase {
.execute().actionGet();
assertThat(searchResponse.isTimedOut(), equalTo(true));
}
public void testPartialResultsIntolerantTimeout() throws Exception {
client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get();
ElasticsearchException ex = expectThrows(ElasticsearchException.class, () ->
ElasticsearchException ex = expectThrows(ElasticsearchException.class, () ->
client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS))
.setQuery(scriptQuery(
new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap())))
.setAllowPartialSearchResults(false) // this line causes timeouts to report failures
.execute().actionGet()
.execute().actionGet()
);
assertTrue(ex.toString().contains("Time exceeded"));
}
}
public static class ScriptedTimeoutPlugin extends MockScriptPlugin {
static final String SCRIPT_NAME = "search_timeout";

View File

@ -68,8 +68,10 @@ public class InternalGeoCentroidTests extends InternalAggregationTestCase<Intern
}
totalCount += input.count();
}
assertEquals(latSum/totalCount, reduced.centroid().getLat(), 1E-5D);
assertEquals(lonSum/totalCount, reduced.centroid().getLon(), 1E-5D);
if (totalCount > 0) {
assertEquals(latSum/totalCount, reduced.centroid().getLat(), 1E-5D);
assertEquals(lonSum/totalCount, reduced.centroid().getLon(), 1E-5D);
}
assertEquals(totalCount, reduced.count());
}

View File

@ -0,0 +1,94 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.transport;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.MockTransportService;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Collections;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.transport.RemoteClusterConnectionTests.startTransport;
public class RemoteClusterClientTests extends ESTestCase {
private final ThreadPool threadPool = new TestThreadPool(getClass().getName());
@Override
public void tearDown() throws Exception {
super.tearDown();
ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS);
}
public void testConnectAndExecuteRequest() throws Exception {
Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build();
try (MockTransportService remoteTransport = startTransport("remote_node", Collections.emptyList(), Version.CURRENT, threadPool,
remoteSettings)) {
DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode();
Settings localSettings = Settings.builder()
.put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true)
.put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build();
try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) {
service.start();
service.acceptIncomingRequests();
RemoteClusterService remoteClusterService = service.getRemoteClusterService();
assertTrue(remoteClusterService.isRemoteNodeConnected("test", remoteNode));
Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test");
ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().execute().get();
assertNotNull(clusterStateResponse);
assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value());
// also test a failure, there is no handler for search registered
ActionNotFoundTransportException ex = expectThrows(ActionNotFoundTransportException.class,
() -> client.prepareSearch().get());
assertEquals("No handler for action [indices:data/read/search]", ex.getMessage());
}
}
}
public void testEnsureWeReconnect() throws Exception {
Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build();
try (MockTransportService remoteTransport = startTransport("remote_node", Collections.emptyList(), Version.CURRENT, threadPool,
remoteSettings)) {
DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode();
Settings localSettings = Settings.builder()
.put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true)
.put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build();
try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) {
service.start();
service.acceptIncomingRequests();
service.disconnectFromNode(remoteNode);
RemoteClusterService remoteClusterService = service.getRemoteClusterService();
assertBusy(() -> assertFalse(remoteClusterService.isRemoteNodeConnected("test", remoteNode)));
Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test");
ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().execute().get();
assertNotNull(clusterStateResponse);
assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value());
}
}
}
}

View File

@ -89,6 +89,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.BiFunction;
import java.util.function.LongSupplier;
import java.util.function.ToLongBiFunction;
@ -118,6 +119,8 @@ public abstract class EngineTestCase extends ESTestCase {
protected String codecName;
protected Path primaryTranslogDir;
protected Path replicaTranslogDir;
// A default primary term is used by engine instances created in this test.
protected AtomicLong primaryTerm = new AtomicLong();
protected static void assertVisibleCount(Engine engine, int numDocs) throws IOException {
assertVisibleCount(engine, numDocs, true);
@ -138,7 +141,7 @@ public abstract class EngineTestCase extends ESTestCase {
@Before
public void setUp() throws Exception {
super.setUp();
primaryTerm.set(randomLongBetween(1, Long.MAX_VALUE));
CodecService codecService = new CodecService(null, logger);
String name = Codec.getDefault().getName();
if (Arrays.asList(codecService.availableCodecs()).contains(name)) {
@ -186,7 +189,7 @@ public abstract class EngineTestCase extends ESTestCase {
new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(),
config.getTranslogConfig(), config.getFlushMergesAfter(),
config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(),
config.getCircuitBreakerService(), globalCheckpointSupplier);
config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier());
}
public EngineConfig copy(EngineConfig config, Analyzer analyzer) {
@ -195,7 +198,7 @@ public abstract class EngineTestCase extends ESTestCase {
new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(),
config.getTranslogConfig(), config.getFlushMergesAfter(),
config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(),
config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier());
config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier());
}
@Override
@ -268,15 +271,16 @@ public abstract class EngineTestCase extends ESTestCase {
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
}
protected Translog createTranslog() throws IOException {
return createTranslog(primaryTranslogDir);
protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOException {
return createTranslog(primaryTranslogDir, primaryTermSupplier);
}
protected Translog createTranslog(Path translogPath) throws IOException {
protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException {
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE);
String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId);
return new Translog(
translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED);
String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId,
primaryTermSupplier.getAsLong());
return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS),
() -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier);
}
protected InternalEngine createEngine(Store store, Path translogPath) throws IOException {
@ -374,8 +378,8 @@ public abstract class EngineTestCase extends ESTestCase {
final Directory directory = store.directory();
if (Lucene.indexExists(directory) == false) {
store.createEmpty();
final String translogUuid =
Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId);
final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(),
SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get());
store.associateIndexWithNewTranslog(translogUuid);
}
@ -457,7 +461,7 @@ public abstract class EngineTestCase extends ESTestCase {
new NoneCircuitBreakerService(),
globalCheckpointSupplier == null ?
new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED) :
globalCheckpointSupplier);
globalCheckpointSupplier, primaryTerm::get);
return config;
}
@ -483,12 +487,12 @@ public abstract class EngineTestCase extends ESTestCase {
}
protected Engine.Index indexForDoc(ParsedDocument doc) {
return new Engine.Index(newUid(doc), doc);
return new Engine.Index(newUid(doc), primaryTerm.get(), doc);
}
protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo,
boolean isRetry) {
return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL,
return new Engine.Index(newUid(doc), doc, seqNo, primaryTerm.get(), version, VersionType.EXTERNAL,
Engine.Operation.Origin.REPLICA, System.nanoTime(),
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry);
}

View File

@ -106,6 +106,8 @@ import static org.hamcrest.Matchers.hasSize;
*/
public abstract class IndexShardTestCase extends ESTestCase {
public static final IndexEventListener EMPTY_EVENT_LISTENER = new IndexEventListener() {};
protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
@ -261,24 +263,25 @@ public abstract class IndexShardTestCase extends ESTestCase {
final ShardId shardId = routing.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, listeners);
return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, engineFactory, globalCheckpointSyncer,
EMPTY_EVENT_LISTENER, listeners);
}
/**
* creates a new initializing shard.
*
* @param routing shard routing to use
* @param routing shard routing to use
* @param shardPath path to use for shard data
* @param indexMetaData indexMetaData for the shard, including any mapping
* @param indexSearcherWrapper an optional wrapper to be used during searchers
* @param globalCheckpointSyncer callback for syncing global checkpoints
* @param indexEventListener
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData,
@Nullable IndexSearcherWrapper indexSearcherWrapper,
@Nullable EngineFactory engineFactory,
Runnable globalCheckpointSyncer,
IndexingOperationListener... listeners) throws IOException {
IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException {
final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
final IndexShard indexShard;
@ -290,8 +293,6 @@ public abstract class IndexShardTestCase extends ESTestCase {
indexSettings.getSettings(), "index");
mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY);
SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap());
final IndexEventListener indexEventListener = new IndexEventListener() {
};
final Engine.Warmer warmer = searcher -> {
};
ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
@ -336,7 +337,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
null,
current.engineFactory,
current.getGlobalCheckpointSyncer(),
listeners);
EMPTY_EVENT_LISTENER, listeners);
}
/**

View File

@ -135,6 +135,7 @@ import java.util.Map;
import java.util.Objects;
import java.util.Random;
import java.util.Set;
import java.util.TimeZone;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@ -173,6 +174,9 @@ import static org.hamcrest.Matchers.hasItem;
@LuceneTestCase.SuppressReproduceLine
public abstract class ESTestCase extends LuceneTestCase {
private static final List<String> JODA_TIMEZONE_IDS;
private static final List<String> JAVA_TIMEZONE_IDS;
private static final AtomicInteger portGenerator = new AtomicInteger();
@AfterClass
@ -191,6 +195,14 @@ public abstract class ESTestCase extends LuceneTestCase {
}));
BootstrapForTesting.ensureInitialized();
List<String> jodaTZIds = new ArrayList<>(DateTimeZone.getAvailableIDs());
Collections.sort(jodaTZIds);
JODA_TIMEZONE_IDS = Collections.unmodifiableList(jodaTZIds);
List<String> javaTZIds = Arrays.asList(TimeZone.getAvailableIDs());
Collections.sort(javaTZIds);
JAVA_TIMEZONE_IDS = Collections.unmodifiableList(javaTZIds);
}
protected final Logger logger = Loggers.getLogger(getClass());
@ -669,9 +681,14 @@ public abstract class ESTestCase extends LuceneTestCase {
* generate a random DateTimeZone from the ones available in joda library
*/
public static DateTimeZone randomDateTimeZone() {
List<String> ids = new ArrayList<>(DateTimeZone.getAvailableIDs());
Collections.sort(ids);
return DateTimeZone.forID(randomFrom(ids));
return DateTimeZone.forID(randomFrom(JODA_TIMEZONE_IDS));
}
/**
* generate a random TimeZone from the ones available in java.time
*/
public static TimeZone randomTimeZone() {
return TimeZone.getTimeZone(randomFrom(JAVA_TIMEZONE_IDS));
}
/**