From fac009630d77e072c589889743e2a56dc70d16a4 Mon Sep 17 00:00:00 2001 From: Martijn van Groningen Date: Thu, 12 Apr 2018 11:41:41 +0200 Subject: [PATCH 01/14] test: Index more docs, so that it is less likely the search request does not time out. Closes #29221 --- .../elasticsearch/search/SearchTimeoutIT.java | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java b/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java index 15bed6979fc..762a22c823f 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java +++ b/server/src/test/java/org/elasticsearch/search/SearchTimeoutIT.java @@ -21,15 +21,12 @@ package org.elasticsearch.search; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.Script; import org.elasticsearch.script.ScriptType; -import org.elasticsearch.search.aggregations.AggregatorFactories; -import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.test.ESIntegTestCase; import java.util.Collection; @@ -57,7 +54,10 @@ public class SearchTimeoutIT extends ESIntegTestCase { } public void testSimpleTimeout() throws Exception { - client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); + for (int i = 0; i < 32; i++) { + client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value").get(); + } + refresh("test"); SearchResponse searchResponse = client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) .setQuery(scriptQuery( @@ -66,19 +66,19 @@ public class SearchTimeoutIT extends ESIntegTestCase { .execute().actionGet(); assertThat(searchResponse.isTimedOut(), equalTo(true)); } - + public void testPartialResultsIntolerantTimeout() throws Exception { client().prepareIndex("test", "type", "1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); - - ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> + + ElasticsearchException ex = expectThrows(ElasticsearchException.class, () -> client().prepareSearch("test").setTimeout(new TimeValue(10, TimeUnit.MILLISECONDS)) .setQuery(scriptQuery( new Script(ScriptType.INLINE, "mockscript", SCRIPT_NAME, Collections.emptyMap()))) .setAllowPartialSearchResults(false) // this line causes timeouts to report failures - .execute().actionGet() + .execute().actionGet() ); assertTrue(ex.toString().contains("Time exceeded")); - } + } public static class ScriptedTimeoutPlugin extends MockScriptPlugin { static final String SCRIPT_NAME = "search_timeout"; From e0ec8571ea42fad5cd3a23ef0a867e8762507718 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Thu, 12 Apr 2018 07:28:34 -0600 Subject: [PATCH 02/14] Build: introduce keystoreFile for cluster config (#29491) This commit introduces built in support for adding files to the keystore when configuring the integration test cluster for a project. In order to use this support, simply add `keystoreFile` followed by the secure setting name and the path to the source file inside the integTestCluster closure for a project. The built in support will handle the creation of the keystore and the addition of the file to the keystore. --- .../gradle/test/ClusterConfiguration.groovy | 11 ++++++ .../gradle/test/ClusterFormationTasks.groovy | 34 ++++++++++++++++++- plugins/repository-gcs/build.gradle | 4 +-- 3 files changed, 45 insertions(+), 4 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy index 884f008b8ba..1e609c5e05f 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterConfiguration.groovy @@ -141,6 +141,8 @@ class ClusterConfiguration { Map keystoreSettings = new HashMap<>() + Map keystoreFiles = new HashMap<>() + // map from destination path, to source file Map extraConfigFiles = new HashMap<>() @@ -167,6 +169,15 @@ class ClusterConfiguration { keystoreSettings.put(name, value) } + /** + * Adds a file to the keystore. The name is the secure setting name, and the sourceFile + * is anything accepted by project.file() + */ + @Input + void keystoreFile(String name, Object sourceFile) { + keystoreFiles.put(name, sourceFile) + } + @Input void plugin(String path) { Project pluginProject = project.project(path) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy index 8e97ee352ea..f0b232c0d47 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/test/ClusterFormationTasks.groovy @@ -180,6 +180,7 @@ class ClusterFormationTasks { setup = configureWriteConfigTask(taskName(prefix, node, 'configure'), project, setup, node, seedNode) setup = configureCreateKeystoreTask(taskName(prefix, node, 'createKeystore'), project, setup, node) setup = configureAddKeystoreSettingTasks(prefix, project, setup, node) + setup = configureAddKeystoreFileTasks(prefix, project, setup, node) if (node.config.plugins.isEmpty() == false) { if (node.nodeVersion == VersionProperties.elasticsearch) { @@ -323,7 +324,7 @@ class ClusterFormationTasks { /** Adds a task to create keystore */ static Task configureCreateKeystoreTask(String name, Project project, Task setup, NodeInfo node) { - if (node.config.keystoreSettings.isEmpty()) { + if (node.config.keystoreSettings.isEmpty() && node.config.keystoreFiles.isEmpty()) { return setup } else { /* @@ -357,6 +358,37 @@ class ClusterFormationTasks { return parentTask } + /** Adds tasks to add files to the keystore */ + static Task configureAddKeystoreFileTasks(String parent, Project project, Task setup, NodeInfo node) { + Map kvs = node.config.keystoreFiles + if (kvs.isEmpty()) { + return setup + } + Task parentTask = setup + /* + * We have to delay building the string as the path will not exist during configuration which will fail on Windows due to getting + * the short name requiring the path to already exist. + */ + final Object esKeystoreUtil = "${-> node.binPath().resolve('elasticsearch-keystore').toString()}" + for (Map.Entry entry in kvs) { + String key = entry.getKey() + String name = taskName(parent, node, 'addToKeystore#' + key) + String srcFileName = entry.getValue() + Task t = configureExecTask(name, project, parentTask, node, esKeystoreUtil, 'add-file', key, srcFileName) + t.doFirst { + File srcFile = project.file(srcFileName) + if (srcFile.isDirectory()) { + throw new GradleException("Source for keystoreFile must be a file: ${srcFile}") + } + if (srcFile.exists() == false) { + throw new GradleException("Source file for keystoreFile does not exist: ${srcFile}") + } + } + parentTask = t + } + return parentTask + } + static Task configureExtraConfigFilesTask(String name, Project project, Task setup, NodeInfo node) { if (node.config.extraConfigFiles.isEmpty()) { return setup diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index bf2768a4312..f627b7aee10 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -100,9 +100,7 @@ task createServiceAccountFile() { integTestCluster { dependsOn createServiceAccountFile, googleCloudStorageFixture - setupCommand 'create-elasticsearch-keystore', 'bin/elasticsearch-keystore', 'create' - setupCommand 'add-credentials-to-elasticsearch-keystore', - 'bin/elasticsearch-keystore', 'add-file', 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" + keystoreFile 'gcs.client.integration_test.credentials_file', "${serviceAccountFile.absolutePath}" /* Use a closure on the string to delay evaluation until tests are executed */ setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" From 0aa19186ae3302e60c521d90d71563c64d68d024 Mon Sep 17 00:00:00 2001 From: Igor Motov Date: Thu, 12 Apr 2018 10:13:40 -0400 Subject: [PATCH 03/14] Fix NPE in InternalGeoCentroidTests#testReduceRandom (#29481) In some rare cases all inputs might have zero count and resulting in zero totalCount, and null in centroid causing NPE. Closes #29480 --- .../metrics/geocentroid/InternalGeoCentroidTests.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java index c797fcb91db..9dc7896638c 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/geocentroid/InternalGeoCentroidTests.java @@ -68,8 +68,10 @@ public class InternalGeoCentroidTests extends InternalAggregationTestCase 0) { + assertEquals(latSum/totalCount, reduced.centroid().getLat(), 1E-5D); + assertEquals(lonSum/totalCount, reduced.centroid().getLon(), 1E-5D); + } assertEquals(totalCount, reduced.count()); } From 14097359a469bc8d9313dd53dc3717d1838609ca Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 12 Apr 2018 10:24:58 -0600 Subject: [PATCH 04/14] Move TimeValue into elasticsearch-core project (#29486) This commit moves the `TimeValue` class into the elasticsearch-core project. This allows us to use this class in many of our other projects without relying on the entire `server` jar. Relates to #28504 --- .../elasticsearch/common/unit/TimeValue.java | 10 +------ .../common/unit/TimeValueTests.java | 30 ------------------- .../XContentElasticsearchExtension.java | 1 + .../common/io/stream/BytesStreamsTests.java | 28 +++++++++++++++++ 4 files changed, 30 insertions(+), 39 deletions(-) rename {server => libs/elasticsearch-core}/src/main/java/org/elasticsearch/common/unit/TimeValue.java (96%) rename {server => libs/elasticsearch-core}/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java (88%) diff --git a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java similarity index 96% rename from server/src/main/java/org/elasticsearch/common/unit/TimeValue.java rename to libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java index 8fa2c94173a..56cdc09b34e 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/TimeValue.java +++ b/libs/elasticsearch-core/src/main/java/org/elasticsearch/common/unit/TimeValue.java @@ -19,15 +19,12 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.common.xcontent.ToXContentFragment; -import org.elasticsearch.common.xcontent.XContentBuilder; - import java.io.IOException; import java.util.Locale; import java.util.Objects; import java.util.concurrent.TimeUnit; -public class TimeValue implements Comparable, ToXContentFragment { +public class TimeValue implements Comparable { /** How many nano-seconds in one milli-second */ public static final long NSEC_PER_MSEC = TimeUnit.NANOSECONDS.convert(1, TimeUnit.MILLISECONDS); @@ -352,9 +349,4 @@ public class TimeValue implements Comparable, ToXContentFragment { double otherValue = ((double) timeValue.duration) * timeValue.timeUnit.toNanos(1); return Double.compare(thisValue, otherValue); } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - return builder.value(toString()); - } } diff --git a/server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java b/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java similarity index 88% rename from server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java rename to libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java index a94e6581ea3..af6b89be5ff 100644 --- a/server/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java +++ b/libs/elasticsearch-core/src/test/java/org/elasticsearch/common/unit/TimeValueTests.java @@ -19,15 +19,10 @@ package org.elasticsearch.common.unit; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; -import java.io.IOException; import java.util.concurrent.TimeUnit; -import static org.elasticsearch.common.unit.TimeValue.timeValueNanos; -import static org.elasticsearch.common.unit.TimeValue.timeValueSeconds; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.Matchers.containsString; @@ -154,31 +149,6 @@ public class TimeValueTests extends ESTestCase { return randomFrom("nanos", "micros", "ms", "s", "m", "h", "d"); } - private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - out.writeTimeValue(value); - assertEquals(expectedSize, out.size()); - - StreamInput in = out.bytes().streamInput(); - TimeValue inValue = in.readTimeValue(); - - assertThat(inValue, equalTo(value)); - assertThat(inValue.duration(), equalTo(value.duration())); - assertThat(inValue.timeUnit(), equalTo(value.timeUnit())); - } - - public void testSerialize() throws Exception { - assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3); - assertEqualityAfterSerialize(timeValueNanos(-1), 2); - assertEqualityAfterSerialize(timeValueNanos(1), 2); - assertEqualityAfterSerialize(timeValueSeconds(30), 2); - - final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values())); - BytesStreamOutput out = new BytesStreamOutput(); - out.writeZLong(timeValue.duration()); - assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); - } - public void testFailOnUnknownUnits() { try { TimeValue.parseTimeValue("23tw", null, "test"); diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java index 42089d23923..38abe90ad46 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentElasticsearchExtension.java @@ -61,6 +61,7 @@ public class XContentElasticsearchExtension implements XContentBuilderExtension writers.put(FixedDateTimeZone.class, (b, v) -> b.value(Objects.toString(v))); writers.put(MutableDateTime.class, XContentBuilder::timeValue); writers.put(DateTime.class, XContentBuilder::timeValue); + writers.put(TimeValue.class, (b, v) -> b.value(v.toString())); writers.put(BytesReference.class, (b, v) -> { if (v == null) { diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 27656e9bc09..cee6c2e4cb8 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.test.ESTestCase; import org.joda.time.DateTimeZone; @@ -41,6 +42,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; import java.util.TreeMap; +import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -812,4 +814,30 @@ public class BytesStreamsTests extends ESTestCase { } assertEquals(0, input.available()); } + + private void assertEqualityAfterSerialize(TimeValue value, int expectedSize) throws IOException { + BytesStreamOutput out = new BytesStreamOutput(); + out.writeTimeValue(value); + assertEquals(expectedSize, out.size()); + + StreamInput in = out.bytes().streamInput(); + TimeValue inValue = in.readTimeValue(); + + assertThat(inValue, equalTo(value)); + assertThat(inValue.duration(), equalTo(value.duration())); + assertThat(inValue.timeUnit(), equalTo(value.timeUnit())); + } + + public void testTimeValueSerialize() throws Exception { + assertEqualityAfterSerialize(new TimeValue(100, TimeUnit.DAYS), 3); + assertEqualityAfterSerialize(TimeValue.timeValueNanos(-1), 2); + assertEqualityAfterSerialize(TimeValue.timeValueNanos(1), 2); + assertEqualityAfterSerialize(TimeValue.timeValueSeconds(30), 2); + + final TimeValue timeValue = new TimeValue(randomIntBetween(0, 1024), randomFrom(TimeUnit.values())); + BytesStreamOutput out = new BytesStreamOutput(); + out.writeZLong(timeValue.duration()); + assertEqualityAfterSerialize(timeValue, 1 + out.bytes().length()); + } + } From d72d3f996e2f1375c2592858152b7715b8a9adb7 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Thu, 12 Apr 2018 11:56:42 -0600 Subject: [PATCH 05/14] Add a helper method to get a random java.util.TimeZone (#29487) * Add a helper method to get a random java.util.TimeZone This adds a helper method to ESTestCase that returns a randomized `java.util.TimeZone`. This can be used when transitioning code from Joda to the JDK's time classes. --- .../org/elasticsearch/test/ESTestCase.java | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index a65b8b430e6..32c660cd5d2 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -135,6 +135,7 @@ import java.util.Map; import java.util.Objects; import java.util.Random; import java.util.Set; +import java.util.TimeZone; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -173,6 +174,9 @@ import static org.hamcrest.Matchers.hasItem; @LuceneTestCase.SuppressReproduceLine public abstract class ESTestCase extends LuceneTestCase { + private static final List JODA_TIMEZONE_IDS; + private static final List JAVA_TIMEZONE_IDS; + private static final AtomicInteger portGenerator = new AtomicInteger(); @AfterClass @@ -191,6 +195,14 @@ public abstract class ESTestCase extends LuceneTestCase { })); BootstrapForTesting.ensureInitialized(); + + List jodaTZIds = new ArrayList<>(DateTimeZone.getAvailableIDs()); + Collections.sort(jodaTZIds); + JODA_TIMEZONE_IDS = Collections.unmodifiableList(jodaTZIds); + + List javaTZIds = Arrays.asList(TimeZone.getAvailableIDs()); + Collections.sort(javaTZIds); + JAVA_TIMEZONE_IDS = Collections.unmodifiableList(javaTZIds); } protected final Logger logger = Loggers.getLogger(getClass()); @@ -669,9 +681,14 @@ public abstract class ESTestCase extends LuceneTestCase { * generate a random DateTimeZone from the ones available in joda library */ public static DateTimeZone randomDateTimeZone() { - List ids = new ArrayList<>(DateTimeZone.getAvailableIDs()); - Collections.sort(ids); - return DateTimeZone.forID(randomFrom(ids)); + return DateTimeZone.forID(randomFrom(JODA_TIMEZONE_IDS)); + } + + /** + * generate a random TimeZone from the ones available in java.time + */ + public static TimeZone randomTimeZone() { + return TimeZone.getTimeZone(randomFrom(JAVA_TIMEZONE_IDS)); } /** From f96e00badf1411426190dd34c6dbfd341abdf024 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Thu, 12 Apr 2018 13:57:59 -0400 Subject: [PATCH 06/14] Add primary term to translog header (#29227) This change adds the current primary term to the header of the current translog file. Having a term in a translog header is a prerequisite step that allows us to trim translog operations given the max valid seq# for that term. This commit also updates tests to conform the primary term invariant which guarantees that all translog operations in a translog file have its terms at most the term stored in the translog header. --- .../elasticsearch/index/IndexSettings.java | 4 +- .../elasticsearch/index/engine/Engine.java | 15 +- .../index/engine/EngineConfig.java | 11 +- .../index/engine/InternalEngine.java | 2 +- .../elasticsearch/index/shard/IndexShard.java | 2 +- .../index/shard/StoreRecovery.java | 10 +- .../index/translog/BaseTranslogReader.java | 22 +- .../index/translog/Translog.java | 62 +++-- .../index/translog/TranslogHeader.java | 195 +++++++++++++++ .../index/translog/TranslogReader.java | 98 +------- .../index/translog/TranslogSnapshot.java | 8 +- .../index/translog/TranslogWriter.java | 46 +--- .../translog/TruncateTranslogCommand.java | 11 +- .../indices/recovery/RecoveryTarget.java | 4 +- .../resync/ResyncReplicationRequestTests.java | 3 +- .../elasticsearch/index/IndexModuleTests.java | 2 +- .../index/engine/InternalEngineTests.java | 86 +++---- .../RecoveryDuringReplicationTests.java | 2 +- .../index/shard/IndexShardIT.java | 2 +- .../index/shard/IndexShardTests.java | 22 +- .../shard/IndexingOperationListenerTests.java | 4 +- .../index/shard/RefreshListenersTests.java | 7 +- .../index/translog/TestTranslog.java | 51 ++-- .../translog/TranslogDeletionPolicyTests.java | 2 +- .../index/translog/TranslogHeaderTests.java | 128 ++++++++++ .../index/translog/TranslogTests.java | 228 ++++++++++-------- .../index/translog/TranslogVersionTests.java | 96 -------- .../elasticsearch/indices/flush/FlushIT.java | 2 +- .../recovery/RecoverySourceHandlerTests.java | 2 +- .../indices/recovery/RecoveryTests.java | 3 +- .../index/engine/EngineTestCase.java | 32 +-- 31 files changed, 680 insertions(+), 482 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java create mode 100644 server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java delete mode 100644 server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java diff --git a/server/src/main/java/org/elasticsearch/index/IndexSettings.java b/server/src/main/java/org/elasticsearch/index/IndexSettings.java index e3c82b8abd3..db661743d9e 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexSettings.java +++ b/server/src/main/java/org/elasticsearch/index/IndexSettings.java @@ -185,7 +185,7 @@ public final class IndexSettings { public static final Setting INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING = Setting.byteSizeSetting("index.translog.flush_threshold_size", new ByteSizeValue(512, ByteSizeUnit.MB), /* - * An empty translog occupies 43 bytes on disk. If the flush threshold is below this, the flush thread + * An empty translog occupies 55 bytes on disk. If the flush threshold is below this, the flush thread * can get stuck in an infinite loop as the shouldPeriodicallyFlush can still be true after flushing. * However, small thresholds are useful for testing so we do not add a large lower bound here. */ @@ -220,7 +220,7 @@ public final class IndexSettings { "index.translog.generation_threshold_size", new ByteSizeValue(64, ByteSizeUnit.MB), /* - * An empty translog occupies 43 bytes on disk. If the generation threshold is + * An empty translog occupies 55 bytes on disk. If the generation threshold is * below this, the flush thread can get stuck in an infinite loop repeatedly * rolling the generation as every new generation will already exceed the * generation threshold. However, small thresholds are useful for testing so we diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index b6c71e06c93..fab8cba468b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -1066,14 +1066,13 @@ public abstract class Engine implements Closeable { this.autoGeneratedIdTimestamp = autoGeneratedIdTimestamp; } - public Index(Term uid, ParsedDocument doc) { - this(uid, doc, Versions.MATCH_ANY); + public Index(Term uid, long primaryTerm, ParsedDocument doc) { + this(uid, primaryTerm, doc, Versions.MATCH_ANY); } // TEST ONLY - Index(Term uid, ParsedDocument doc, long version) { - // use a primary term of 2 to allow tests to reduce it to a valid >0 term - this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 2, version, VersionType.INTERNAL, - Origin.PRIMARY, System.nanoTime(), -1, false); + Index(Term uid, long primaryTerm, ParsedDocument doc, long version) { + this(uid, doc, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, version, VersionType.INTERNAL, + Origin.PRIMARY, System.nanoTime(), -1, false); } // TEST ONLY public ParsedDocument parsedDoc() { @@ -1143,8 +1142,8 @@ public abstract class Engine implements Closeable { this.id = Objects.requireNonNull(id); } - public Delete(String type, String id, Term uid) { - this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); + public Delete(String type, String id, Term uid, long primaryTerm) { + this(type, id, uid, SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, Origin.PRIMARY, System.nanoTime()); } public Delete(Delete template, VersionType versionType) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java index 352c3ba3e62..b7c5a416913 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/elasticsearch/index/engine/EngineConfig.java @@ -79,6 +79,7 @@ public final class EngineConfig { @Nullable private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; + private final LongSupplier primaryTermSupplier; /** * Index setting to change the low level lucene codec used for writing new segments. @@ -125,7 +126,7 @@ public final class EngineConfig { List externalRefreshListener, List internalRefreshListener, Sort indexSort, TranslogRecoveryRunner translogRecoveryRunner, CircuitBreakerService circuitBreakerService, - LongSupplier globalCheckpointSupplier) { + LongSupplier globalCheckpointSupplier, LongSupplier primaryTermSupplier) { this.shardId = shardId; this.allocationId = allocationId; this.indexSettings = indexSettings; @@ -152,6 +153,7 @@ public final class EngineConfig { this.translogRecoveryRunner = translogRecoveryRunner; this.circuitBreakerService = circuitBreakerService; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.primaryTermSupplier = primaryTermSupplier; } /** @@ -354,4 +356,11 @@ public final class EngineConfig { public CircuitBreakerService getCircuitBreakerService() { return this.circuitBreakerService; } + + /** + * Returns a supplier that supplies the latest primary term value of the associated shard. + */ + public LongSupplier getPrimaryTermSupplier() { + return primaryTermSupplier; + } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 702f247b419..dcd1ba65d89 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -418,7 +418,7 @@ public class InternalEngine extends Engine { final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); final String translogUUID = loadTranslogUUIDFromLastCommit(); // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! - return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier); + return new Translog(translogConfig, translogUUID, translogDeletionPolicy, globalCheckpointSupplier, engineConfig.getPrimaryTermSupplier()); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index cab7246a4cb..520115dc30a 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -2136,7 +2136,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Collections.singletonList(refreshListeners), Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), - indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker); + indexSort, this::runTranslogRecovery, circuitBreakerService, replicationTracker, this::getPrimaryTerm); } /** diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index 3654aeba2bf..54718c545a4 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -393,7 +393,8 @@ final class StoreRecovery { store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); - final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } else if (indexShouldExists) { // since we recover from local, just fill the files and size @@ -407,8 +408,8 @@ final class StoreRecovery { } } else { store.createEmpty(); - final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), - SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), SequenceNumbers.NO_OPS_PERFORMED, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } indexShard.openEngineAndRecoverFromTranslog(); @@ -456,7 +457,8 @@ final class StoreRecovery { store.bootstrapNewHistory(); final SegmentInfos segmentInfos = store.readLastCommittedSegmentsInfo(); final long maxSeqNo = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.MAX_SEQ_NO)); - final String translogUUID = Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), maxSeqNo, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); assert indexShard.shardRouting.primary() : "only primary shards can recover from store"; indexShard.openEngineAndRecoverFromTranslog(); diff --git a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java index 53d14f32299..ff226ae00be 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/BaseTranslogReader.java @@ -35,15 +35,15 @@ public abstract class BaseTranslogReader implements Comparable getPrimaryTerm() && getPrimaryTerm() != TranslogHeader.UNKNOWN_PRIMARY_TERM) { + throw new TranslogCorruptedException("Operation's term is newer than translog header term; " + + "operation term[" + op.primaryTerm() + "], translog header term [" + getPrimaryTerm() + "]"); + } + return op; } /** diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 9726654c386..ab4961892ca 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -107,7 +107,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC public static final String CHECKPOINT_FILE_NAME = "translog" + CHECKPOINT_SUFFIX; static final Pattern PARSE_STRICT_ID_PATTERN = Pattern.compile("^" + TRANSLOG_FILE_PREFIX + "(\\d+)(\\.tlog)$"); - public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogWriter.getHeaderLength(UUIDs.randomBase64UUID()); + public static final int DEFAULT_HEADER_SIZE_IN_BYTES = TranslogHeader.headerSizeInBytes(UUIDs.randomBase64UUID()); // the list of translog readers is guaranteed to be in order of translog generation private final List readers = new ArrayList<>(); @@ -120,6 +120,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private final AtomicBoolean closed = new AtomicBoolean(); private final TranslogConfig config; private final LongSupplier globalCheckpointSupplier; + private final LongSupplier primaryTermSupplier; private final String translogUUID; private final TranslogDeletionPolicy deletionPolicy; @@ -131,17 +132,22 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC * translog file referenced by this generation. The translog creation will fail if this generation can't be opened. * * @param config the configuration of this translog - * @param translogUUID the translog uuid to open, null for a new translog + * @param translogUUID the translog uuid to open, null for a new translog * @param deletionPolicy an instance of {@link TranslogDeletionPolicy} that controls when a translog file can be safely * deleted * @param globalCheckpointSupplier a supplier for the global checkpoint + * @param primaryTermSupplier a supplier for the latest value of primary term of the owning index shard. The latest term value is + * examined and stored in the header whenever a new generation is rolled. It's guaranteed from outside + * that a new generation is rolled when the term is increased. This guarantee allows to us to validate + * and reject operation whose term is higher than the primary term stored in the translog header. */ public Translog( final TranslogConfig config, final String translogUUID, TranslogDeletionPolicy deletionPolicy, - final LongSupplier globalCheckpointSupplier) throws IOException { + final LongSupplier globalCheckpointSupplier, final LongSupplier primaryTermSupplier) throws IOException { super(config.getShardId(), config.getIndexSettings()); this.config = config; this.globalCheckpointSupplier = globalCheckpointSupplier; + this.primaryTermSupplier = primaryTermSupplier; this.deletionPolicy = deletionPolicy; this.translogUUID = translogUUID; bigArrays = config.getBigArrays(); @@ -163,7 +169,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC // // For this to happen we must have already copied the translog.ckp file into translog-gen.ckp so we first check if that file exists // if not we don't even try to clean it up and wait until we fail creating it - assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogWriter.getHeaderLength(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; + assert Files.exists(nextTranslogFile) == false || Files.size(nextTranslogFile) <= TranslogHeader.headerSizeInBytes(translogUUID) : "unexpected translog file: [" + nextTranslogFile + "]"; if (Files.exists(currentCheckpointFile) // current checkpoint is already copied && Files.deleteIfExists(nextTranslogFile)) { // delete it and log a warning logger.warn("deleted previously created, but not yet committed, next generation [{}]. This can happen due to a tragic exception when creating a new generation", nextTranslogFile.getFileName()); @@ -224,6 +230,9 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC minGenerationToRecoverFrom + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); } final TranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); + assert reader.getPrimaryTerm() <= primaryTermSupplier.getAsLong() : + "Primary terms go backwards; current term [" + primaryTermSupplier.getAsLong() + "]" + + "translog path [ " + committedTranslogFile + ", existing term [" + reader.getPrimaryTerm() + "]"; foundTranslogs.add(reader); logger.debug("recovered local translog from checkpoint {}", checkpoint); } @@ -267,10 +276,6 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } TranslogReader openReader(Path path, Checkpoint checkpoint) throws IOException { - return openReader(path, checkpoint, translogUUID); - } - - private static TranslogReader openReader(Path path, Checkpoint checkpoint, String translogUUID) throws IOException { FileChannel channel = FileChannel.open(path, StandardOpenOption.READ); try { assert Translog.parseIdFromFileName(path) == checkpoint.generation : "expected generation: " + Translog.parseIdFromFileName(path) + " but got: " + checkpoint.generation; @@ -457,7 +462,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC getChannelFactory(), config.getBufferSize(), initialMinTranslogGen, initialGlobalCheckpoint, - globalCheckpointSupplier, this::getMinFileGeneration); + globalCheckpointSupplier, this::getMinFileGeneration, primaryTermSupplier.getAsLong()); } catch (final IOException e) { throw new TranslogException(shardId, "failed to create new translog file", e); } @@ -485,6 +490,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC final ReleasablePagedBytesReference bytes = out.bytes(); try (ReleasableLock ignored = readLock.acquire()) { ensureOpen(); + if (operation.primaryTerm() > current.getPrimaryTerm()) { + throw new IllegalArgumentException("Operation term is newer than the current term;" + + "current term[" + current.getPrimaryTerm() + "], operation term[" + operation + "]"); + } return current.add(bytes, operation.seqNo()); } } catch (final AlreadyClosedException | IOException ex) { @@ -991,17 +1000,17 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC this.autoGeneratedIdTimestamp = index.getAutoGeneratedIdTimestamp(); } - public Index(String type, String id, long seqNo, byte[] source) { - this(type, id, seqNo, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, -1); + public Index(String type, String id, long seqNo, long primaryTerm, byte[] source) { + this(type, id, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL, source, null, -1); } - public Index(String type, String id, long seqNo, long version, VersionType versionType, byte[] source, String routing, - long autoGeneratedIdTimestamp) { + public Index(String type, String id, long seqNo, long primaryTerm, long version, VersionType versionType, + byte[] source, String routing, long autoGeneratedIdTimestamp) { this.type = type; this.id = id; this.source = new BytesArray(source); this.seqNo = seqNo; - this.primaryTerm = 0; + this.primaryTerm = primaryTerm; this.version = version; this.versionType = versionType; this.routing = routing; @@ -1159,8 +1168,8 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } /** utility for testing */ - public Delete(String type, String id, long seqNo, Term uid) { - this(type, id, uid, seqNo, 0, Versions.MATCH_ANY, VersionType.INTERNAL); + public Delete(String type, String id, long seqNo, long primaryTerm, Term uid) { + this(type, id, uid, seqNo, primaryTerm, Versions.MATCH_ANY, VersionType.INTERNAL); } public Delete(String type, String id, Term uid, long seqNo, long primaryTerm, long version, VersionType versionType) { @@ -1364,10 +1373,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC } - private static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException { + static void verifyChecksum(BufferedChecksumStreamInput in) throws IOException { // This absolutely must come first, or else reading the checksum becomes part of the checksum long expectedChecksum = in.getChecksum(); - long readChecksum = in.readInt() & 0xFFFF_FFFFL; + long readChecksum = Integer.toUnsignedLong(in.readInt()); if (readChecksum != expectedChecksum) { throw new TranslogCorruptedException("translog stream is corrupted, expected: 0x" + Long.toHexString(expectedChecksum) + ", got: 0x" + Long.toHexString(readChecksum)); @@ -1651,10 +1660,10 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC private static Checkpoint readCheckpoint(Path location, String expectedTranslogUUID) throws IOException { final Checkpoint checkpoint = readCheckpoint(location); - // We need to open at least translog reader to validate the translogUUID. + // We need to open at least one translog header to validate the translogUUID. final Path translogFile = location.resolve(getFilename(checkpoint.generation)); - try (TranslogReader reader = openReader(translogFile, checkpoint, expectedTranslogUUID)) { - + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(expectedTranslogUUID, translogFile, channel); } catch (TranslogCorruptedException ex) { throw ex; // just bubble up. } catch (Exception ex) { @@ -1693,13 +1702,14 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC return readers; } - public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, final ShardId shardId) - throws IOException { + public static String createEmptyTranslog(final Path location, final long initialGlobalCheckpoint, + final ShardId shardId, final long primaryTerm) throws IOException { final ChannelFactory channelFactory = FileChannel::open; - return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory); + return createEmptyTranslog(location, initialGlobalCheckpoint, shardId, channelFactory, primaryTerm); } - static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, ChannelFactory channelFactory) throws IOException { + static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, + ChannelFactory channelFactory, long primaryTerm) throws IOException { IOUtils.rm(location); Files.createDirectories(location); final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); @@ -1709,7 +1719,7 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC final String translogUUID = UUIDs.randomBase64UUID(); TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, location.resolve(getFilename(1)), channelFactory, new ByteSizeValue(10), 1, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); } + () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm ); writer.close(); return translogUUID; diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java new file mode 100644 index 00000000000..0fde24d8bb4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogHeader.java @@ -0,0 +1,195 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.index.CorruptIndexException; +import org.apache.lucene.index.IndexFormatTooNewException; +import org.apache.lucene.index.IndexFormatTooOldException; +import org.apache.lucene.store.InputStreamDataInput; +import org.apache.lucene.store.OutputStreamDataOutput; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Path; + +/** + * Each translog file is started with a translog header then followed by translog operations. + */ +final class TranslogHeader { + public static final String TRANSLOG_CODEC = "translog"; + + public static final int VERSION_CHECKSUMS = 1; // pre-2.0 - unsupported + public static final int VERSION_CHECKPOINTS = 2; // added checkpoints + public static final int VERSION_PRIMARY_TERM = 3; // added primary term + public static final int CURRENT_VERSION = VERSION_PRIMARY_TERM; + + public static final long UNKNOWN_PRIMARY_TERM = 0L; + + private final String translogUUID; + private final long primaryTerm; + private final int headerSizeInBytes; + + /** + * Creates a new translog header with the given uuid and primary term. + * + * @param translogUUID this UUID is used to prevent accidental recovery from a transaction log that belongs to a + * different engine + * @param primaryTerm the primary term of the owning index shard when creating (eg. rolling) this translog file. + * All operations' terms in this translog file are enforced to be at most this term. + */ + TranslogHeader(String translogUUID, long primaryTerm) { + this(translogUUID, primaryTerm, headerSizeInBytes(translogUUID)); + assert primaryTerm >= 0 : "Primary term must be non-negative; term [" + primaryTerm + "]"; + } + + private TranslogHeader(String translogUUID, long primaryTerm, int headerSizeInBytes) { + this.translogUUID = translogUUID; + this.primaryTerm = primaryTerm; + this.headerSizeInBytes = headerSizeInBytes; + } + + public String getTranslogUUID() { + return translogUUID; + } + + /** + * Returns the primary term stored in this translog header. + * All operations in a translog file are expected to have their primary terms at most this term. + */ + public long getPrimaryTerm() { + return primaryTerm; + } + + /** + * Returns the header size in bytes. This value can be used as the offset of the first translog operation. + * See {@link BaseTranslogReader#getFirstOperationOffset()} + */ + public int sizeInBytes() { + return headerSizeInBytes; + } + + static int headerSizeInBytes(String translogUUID) { + return headerSizeInBytes(CURRENT_VERSION, new BytesRef(translogUUID).length); + } + + private static int headerSizeInBytes(int version, int uuidLength) { + int size = CodecUtil.headerLength(TRANSLOG_CODEC); + size += Integer.BYTES + uuidLength; // uuid + if (version >= VERSION_PRIMARY_TERM) { + size += Long.BYTES; // primary term + size += Integer.BYTES; // checksum + } + return size; + } + + /** + * Read a translog header from the given path and file channel + */ + static TranslogHeader read(final String translogUUID, final Path path, final FileChannel channel) throws IOException { + // This input is intentionally not closed because closing it will close the FileChannel. + final BufferedChecksumStreamInput in = + new BufferedChecksumStreamInput(new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), channel.size())); + final int version; + try { + version = CodecUtil.checkHeader(new InputStreamDataInput(in), TRANSLOG_CODEC, VERSION_CHECKSUMS, VERSION_PRIMARY_TERM); + } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { + tryReportOldVersionError(path, channel); + throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e); + } + if (version == VERSION_CHECKSUMS) { + throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); + } + // Read the translogUUID + final int uuidLen = in.readInt(); + if (uuidLen > channel.size()) { + throw new TranslogCorruptedException("uuid length can't be larger than the translog"); + } + final BytesRef uuid = new BytesRef(uuidLen); + uuid.length = uuidLen; + in.read(uuid.bytes, uuid.offset, uuid.length); + final BytesRef expectedUUID = new BytesRef(translogUUID); + if (uuid.bytesEquals(expectedUUID) == false) { + throw new TranslogCorruptedException("expected shard UUID " + expectedUUID + " but got: " + uuid + + " this translog file belongs to a different translog. path:" + path); + } + // Read the primary term + final long primaryTerm; + if (version == VERSION_PRIMARY_TERM) { + primaryTerm = in.readLong(); + assert primaryTerm >= 0 : "Primary term must be non-negative [" + primaryTerm + "]; translog path [" + path + "]"; + } else { + assert version == VERSION_CHECKPOINTS : "Unknown header version [" + version + "]"; + primaryTerm = UNKNOWN_PRIMARY_TERM; + } + // Verify the checksum + if (version >= VERSION_PRIMARY_TERM) { + Translog.verifyChecksum(in); + } + final int headerSizeInBytes = headerSizeInBytes(version, uuid.length); + assert channel.position() == headerSizeInBytes : + "Header is not fully read; header size [" + headerSizeInBytes + "], position [" + channel.position() + "]"; + return new TranslogHeader(translogUUID, primaryTerm, headerSizeInBytes); + } + + private static void tryReportOldVersionError(final Path path, final FileChannel channel) throws IOException { + // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the header, in binary this looks like: + // binary: 0011 1111 1101 0111 0110 1100 0001 0111 + // hex : 3 f d 7 6 c 1 7 + // + // With version 0 of the translog, the first byte is the Operation.Type, which will always be between 0-4, + // so we know if we grab the first byte, it can be: + // 0x3f => Lucene's magic number, so we can assume it's version 1 or later + // 0x00 => version 0 of the translog + final byte b1 = Channels.readFromFileChannel(channel, 0, 1)[0]; + if (b1 == 0x3f) { // LUCENE_CODEC_HEADER_BYTE + throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path); + } else if (b1 == 0x00) { // UNVERSIONED_TRANSLOG_HEADER_BYTE + throw new IllegalStateException("pre-1.4 translog found [" + path + "]"); + } + } + + /** + * Writes this header with the latest format into the file channel + */ + void write(final FileChannel channel) throws IOException { + // This output is intentionally not closed because closing it will close the FileChannel. + @SuppressWarnings({"IOResourceOpenedButNotSafelyClosed", "resource"}) + final BufferedChecksumStreamOutput out = new BufferedChecksumStreamOutput( + new OutputStreamStreamOutput(java.nio.channels.Channels.newOutputStream(channel))); + CodecUtil.writeHeader(new OutputStreamDataOutput(out), TRANSLOG_CODEC, CURRENT_VERSION); + // Write uuid + final BytesRef uuid = new BytesRef(translogUUID); + out.writeInt(uuid.length); + out.writeBytes(uuid.bytes, uuid.offset, uuid.length); + // Write primary term + out.writeLong(primaryTerm); + // Checksum header + out.writeInt((int) out.getChecksum()); + out.flush(); + channel.force(true); + assert channel.position() == headerSizeInBytes : + "Header is not fully written; header size [" + headerSizeInBytes + "], channel position [" + channel.position() + "]"; + } +} diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java index b88037c32fd..29e30bd25dd 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogReader.java @@ -19,15 +19,8 @@ package org.elasticsearch.index.translog; -import org.apache.lucene.codecs.CodecUtil; -import org.apache.lucene.index.CorruptIndexException; -import org.apache.lucene.index.IndexFormatTooNewException; -import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.AlreadyClosedException; -import org.apache.lucene.store.InputStreamDataInput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.io.Channels; -import org.elasticsearch.common.io.stream.InputStreamStreamInput; import java.io.Closeable; import java.io.EOFException; @@ -41,10 +34,6 @@ import java.util.concurrent.atomic.AtomicBoolean; * an immutable translog filereader */ public class TranslogReader extends BaseTranslogReader implements Closeable { - - private static final byte LUCENE_CODEC_HEADER_BYTE = 0x3f; - private static final byte UNVERSIONED_TRANSLOG_HEADER_BYTE = 0x00; - protected final long length; private final int totalOperations; private final Checkpoint checkpoint; @@ -53,13 +42,13 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { /** * Create a translog writer against the specified translog file channel. * - * @param checkpoint the translog checkpoint - * @param channel the translog file channel to open a translog reader against - * @param path the path to the translog - * @param firstOperationOffset the offset to the first operation + * @param checkpoint the translog checkpoint + * @param channel the translog file channel to open a translog reader against + * @param path the path to the translog + * @param header the header of the translog file */ - TranslogReader(final Checkpoint checkpoint, final FileChannel channel, final Path path, final long firstOperationOffset) { - super(checkpoint.generation, channel, path, firstOperationOffset); + TranslogReader(final Checkpoint checkpoint, final FileChannel channel, final Path path, final TranslogHeader header) { + super(checkpoint.generation, channel, path, header); this.length = checkpoint.offset; this.totalOperations = checkpoint.numOps; this.checkpoint = checkpoint; @@ -77,75 +66,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { */ public static TranslogReader open( final FileChannel channel, final Path path, final Checkpoint checkpoint, final String translogUUID) throws IOException { - - try { - InputStreamStreamInput headerStream = new InputStreamStreamInput(java.nio.channels.Channels.newInputStream(channel), - channel.size()); // don't close - // Lucene's CodecUtil writes a magic number of 0x3FD76C17 with the - // header, in binary this looks like: - // - // binary: 0011 1111 1101 0111 0110 1100 0001 0111 - // hex : 3 f d 7 6 c 1 7 - // - // With version 0 of the translog, the first byte is the - // Operation.Type, which will always be between 0-4, so we know if - // we grab the first byte, it can be: - // 0x3f => Lucene's magic number, so we can assume it's version 1 or later - // 0x00 => version 0 of the translog - // - // otherwise the first byte of the translog is corrupted and we - // should bail - byte b1 = headerStream.readByte(); - if (b1 == LUCENE_CODEC_HEADER_BYTE) { - // Read 3 more bytes, meaning a whole integer has been read - byte b2 = headerStream.readByte(); - byte b3 = headerStream.readByte(); - byte b4 = headerStream.readByte(); - // Convert the 4 bytes that were read into an integer - int header = ((b1 & 0xFF) << 24) + ((b2 & 0xFF) << 16) + ((b3 & 0xFF) << 8) + ((b4 & 0xFF) << 0); - // We confirm CodecUtil's CODEC_MAGIC number (0x3FD76C17) - // ourselves here, because it allows us to read the first - // byte separately - if (header != CodecUtil.CODEC_MAGIC) { - throw new TranslogCorruptedException("translog looks like version 1 or later, but has corrupted header. path:" + path); - } - // Confirm the rest of the header using CodecUtil, extracting - // the translog version - int version = CodecUtil.checkHeaderNoMagic(new InputStreamDataInput(headerStream), TranslogWriter.TRANSLOG_CODEC, 1, Integer.MAX_VALUE); - switch (version) { - case TranslogWriter.VERSION_CHECKSUMS: - throw new IllegalStateException("pre-2.0 translog found [" + path + "]"); - case TranslogWriter.VERSION_CHECKPOINTS: - assert path.getFileName().toString().endsWith(Translog.TRANSLOG_FILE_SUFFIX) : "new file ends with old suffix: " + path; - assert checkpoint.numOps >= 0 : "expected at least 0 operation but got: " + checkpoint.numOps; - assert checkpoint.offset <= channel.size() : "checkpoint is inconsistent with channel length: " + channel.size() + " " + checkpoint; - int len = headerStream.readInt(); - if (len > channel.size()) { - throw new TranslogCorruptedException("uuid length can't be larger than the translog"); - } - BytesRef ref = new BytesRef(len); - ref.length = len; - headerStream.read(ref.bytes, ref.offset, ref.length); - BytesRef uuidBytes = new BytesRef(translogUUID); - if (uuidBytes.bytesEquals(ref) == false) { - throw new TranslogCorruptedException("expected shard UUID " + uuidBytes + " but got: " + ref + - " this translog file belongs to a different translog. path:" + path); - } - final long firstOperationOffset; - firstOperationOffset = ref.length + CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC) + Integer.BYTES; - return new TranslogReader(checkpoint, channel, path, firstOperationOffset); - - default: - throw new TranslogCorruptedException("No known translog stream version: " + version + " path:" + path); - } - } else if (b1 == UNVERSIONED_TRANSLOG_HEADER_BYTE) { - throw new IllegalStateException("pre-1.4 translog found [" + path + "]"); - } else { - throw new TranslogCorruptedException("Invalid first byte in translog file, got: " + Long.toHexString(b1) + ", expected 0x00 or 0x3f. path:" + path); - } - } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException e) { - throw new TranslogCorruptedException("Translog header corrupted. path:" + path, e); - } + final TranslogHeader header = TranslogHeader.read(translogUUID, path, channel); + return new TranslogReader(checkpoint, channel, path, header); } public long sizeInBytes() { @@ -168,8 +90,8 @@ public class TranslogReader extends BaseTranslogReader implements Closeable { if (position >= length) { throw new EOFException("read requested past EOF. pos [" + position + "] end: [" + length + "]"); } - if (position < firstOperationOffset) { - throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + firstOperationOffset + "]"); + if (position < getFirstOperationOffset()) { + throw new IOException("read requested before position of first ops. pos [" + position + "] first op on: [" + getFirstOperationOffset() + "]"); } Channels.readFromFileChannelWithEofException(channel, position, buffer); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java index 5f6d14e192e..a9667203532 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogSnapshot.java @@ -39,14 +39,14 @@ final class TranslogSnapshot extends BaseTranslogReader { * Create a snapshot of translog file channel. */ TranslogSnapshot(final BaseTranslogReader reader, final long length) { - super(reader.generation, reader.channel, reader.path, reader.firstOperationOffset); + super(reader.generation, reader.channel, reader.path, reader.header); this.length = length; this.totalOperations = reader.totalOperations(); this.checkpoint = reader.getCheckpoint(); this.reusableBuffer = ByteBuffer.allocate(1024); - readOperations = 0; - position = firstOperationOffset; - reuse = null; + this.readOperations = 0; + this.position = reader.getFirstOperationOffset(); + this.reuse = null; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java index 3bf70c06531..cae65788865 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TranslogWriter.java @@ -19,10 +19,8 @@ package org.elasticsearch.index.translog; -import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.OutputStreamDataOutput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Assertions; import org.elasticsearch.common.bytes.BytesArray; @@ -47,12 +45,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; public class TranslogWriter extends BaseTranslogReader implements Closeable { - - public static final String TRANSLOG_CODEC = "translog"; - public static final int VERSION_CHECKSUMS = 1; - public static final int VERSION_CHECKPOINTS = 2; // since 2.0 we have checkpoints? - public static final int VERSION = VERSION_CHECKPOINTS; - private final ShardId shardId; private final ChannelFactory channelFactory; // the last checkpoint that was written when the translog was last synced @@ -85,10 +77,10 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { final FileChannel channel, final Path path, final ByteSizeValue bufferSize, - final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier) throws IOException { - super(initialCheckpoint.generation, channel, path, channel.position()); + final LongSupplier globalCheckpointSupplier, LongSupplier minTranslogGenerationSupplier, TranslogHeader header) throws IOException { + super(initialCheckpoint.generation, channel, path, header); assert initialCheckpoint.offset == channel.position() : - "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel poistion [" + "initial checkpoint offset [" + initialCheckpoint.offset + "] is different than current channel position [" + channel.position() + "]"; this.shardId = shardId; this.channelFactory = channelFactory; @@ -104,34 +96,16 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { this.seenSequenceNumbers = Assertions.ENABLED ? new HashMap<>() : null; } - static int getHeaderLength(String translogUUID) { - return getHeaderLength(new BytesRef(translogUUID).length); - } - - static int getHeaderLength(int uuidLength) { - return CodecUtil.headerLength(TRANSLOG_CODEC) + uuidLength + Integer.BYTES; - } - - static void writeHeader(OutputStreamDataOutput out, BytesRef ref) throws IOException { - CodecUtil.writeHeader(out, TRANSLOG_CODEC, VERSION); - out.writeInt(ref.length); - out.writeBytes(ref.bytes, ref.offset, ref.length); - } - public static TranslogWriter create(ShardId shardId, String translogUUID, long fileGeneration, Path file, ChannelFactory channelFactory, ByteSizeValue bufferSize, final long initialMinTranslogGen, long initialGlobalCheckpoint, - final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier) + final LongSupplier globalCheckpointSupplier, final LongSupplier minTranslogGenerationSupplier, + final long primaryTerm) throws IOException { - final BytesRef ref = new BytesRef(translogUUID); - final int firstOperationOffset = getHeaderLength(ref.length); final FileChannel channel = channelFactory.open(file); try { - // This OutputStreamDataOutput is intentionally not closed because - // closing it will close the FileChannel - final OutputStreamDataOutput out = new OutputStreamDataOutput(java.nio.channels.Channels.newOutputStream(channel)); - writeHeader(out, ref); - channel.force(true); - final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(firstOperationOffset, fileGeneration, + final TranslogHeader header = new TranslogHeader(translogUUID, primaryTerm); + header.write(channel); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(header.sizeInBytes(), fileGeneration, initialGlobalCheckpoint, initialMinTranslogGen); writeCheckpoint(channelFactory, file.getParent(), checkpoint); final LongSupplier writerGlobalCheckpointSupplier; @@ -146,7 +120,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { writerGlobalCheckpointSupplier = globalCheckpointSupplier; } return new TranslogWriter(channelFactory, shardId, checkpoint, channel, file, bufferSize, - writerGlobalCheckpointSupplier, minTranslogGenerationSupplier); + writerGlobalCheckpointSupplier, minTranslogGenerationSupplier, header); } catch (Exception exception) { // if we fail to bake the file-generation into the checkpoint we stick with the file and once we recover and that // file exists we remove it. We only apply this logic to the checkpoint.generation+1 any other file with a higher generation is an error condition @@ -295,7 +269,7 @@ public class TranslogWriter extends BaseTranslogReader implements Closeable { throw ex; } if (closed.compareAndSet(false, true)) { - return new TranslogReader(getLastSyncedCheckpoint(), channel, path, getFirstOperationOffset()); + return new TranslogReader(getLastSyncedCheckpoint(), channel, path, header); } else { throw new AlreadyClosedException("translog [" + getGeneration() + "] is already closed (path [" + path + "]", tragedy); } diff --git a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java index 164d8fee956..b8bd93e05a6 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java +++ b/server/src/main/java/org/elasticsearch/index/translog/TruncateTranslogCommand.java @@ -33,7 +33,6 @@ import org.apache.lucene.store.Lock; import org.apache.lucene.store.LockObtainFailedException; import org.apache.lucene.store.NativeFSLockFactory; import org.apache.lucene.store.OutputStreamDataOutput; -import org.apache.lucene.util.BytesRef; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cli.EnvironmentAwareCommand; @@ -213,13 +212,11 @@ public class TruncateTranslogCommand extends EnvironmentAwareCommand { * Write a translog containing the given translog UUID to the given location. Returns the number of bytes written. */ public static int writeEmptyTranslog(Path filename, String translogUUID) throws IOException { - final BytesRef translogRef = new BytesRef(translogUUID); - try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.READ, StandardOpenOption.CREATE_NEW); - OutputStreamDataOutput out = new OutputStreamDataOutput(Channels.newOutputStream(fc))) { - TranslogWriter.writeHeader(out, translogRef); - fc.force(true); + try (FileChannel fc = FileChannel.open(filename, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW)) { + TranslogHeader header = new TranslogHeader(translogUUID, TranslogHeader.UNKNOWN_PRIMARY_TERM); + header.write(fc); + return header.sizeInBytes(); } - return TranslogWriter.getHeaderLength(translogRef.length); } /** Show a warning about deleting files, asking for a confirmation if {@code batchMode} is false */ diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java index eb0db395a15..244bb462df6 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryTarget.java @@ -441,8 +441,8 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget store.ensureIndexHasHistoryUUID(); } // TODO: Assign the global checkpoint to the max_seqno of the safe commit if the index version >= 6.2 - final String translogUUID = - Translog.createEmptyTranslog(indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + final String translogUUID = Translog.createEmptyTranslog( + indexShard.shardPath().resolveTranslog(), SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, indexShard.getPrimaryTerm()); store.associateIndexWithNewTranslog(translogUUID); } catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) { // this is a fatal exception at this stage. diff --git a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java index 8c834770b08..d5ad3941a5e 100644 --- a/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/resync/ResyncReplicationRequestTests.java @@ -37,7 +37,8 @@ public class ResyncReplicationRequestTests extends ESTestCase { public void testSerialization() throws IOException { final byte[] bytes = "{}".getBytes(Charset.forName("UTF-8")); - final Translog.Index index = new Translog.Index("type", "id", 0, Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, -1); + final Translog.Index index = new Translog.Index("type", "id", 0, randomNonNegativeLong(), + Versions.MATCH_ANY, VersionType.INTERNAL, bytes, null, -1); final ShardId shardId = new ShardId(new Index("index", "uuid"), 0); final ResyncReplicationRequest before = new ResyncReplicationRequest(shardId, new Translog.Operation[]{index}); diff --git a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 6af1a79249e..008b05f6a1e 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -242,7 +242,7 @@ public class IndexModuleTests extends ESTestCase { assertSame(listener, indexService.getIndexOperationListeners().get(1)); ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null); - Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc); + Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); ShardId shardId = new ShardId(new Index("foo", "bar"), 0); for (IndexingOperationListener l : indexService.getIndexOperationListeners()) { l.preIndex(shardId, index); diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index 9cdc68444ea..60913c644ea 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -235,7 +235,7 @@ public class InternalEngineTests extends EngineTestCase { assertEquals(2, searcher.reader().numDocs()); } assertFalse("safe access should NOT be required last indexing round was only append only", engine.isSafeAccessRequired()); - engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid())); + engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid(), primaryTerm.get())); assertTrue("safe access should be required", engine.isSafeAccessRequired()); engine.refresh("test"); assertTrue("safe access should be required", engine.isSafeAccessRequired()); @@ -317,7 +317,7 @@ public class InternalEngineTests extends EngineTestCase { assertThat(segments.get(1).isCompound(), equalTo(true)); - engine.delete(new Engine.Delete("test", "1", newUid(doc))); + engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); engine.refresh("test"); segments = engine.segments(false); @@ -890,7 +890,7 @@ public class InternalEngineTests extends EngineTestCase { searchResult.close(); // now delete - engine.delete(new Engine.Delete("test", "1", newUid(doc))); + engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); // its not deleted yet searchResult = engine.acquireSearcher("test"); @@ -917,7 +917,7 @@ public class InternalEngineTests extends EngineTestCase { document = testDocumentWithTextField(); document.add(new Field(SourceFieldMapper.NAME, BytesReference.toBytes(B_1), SourceFieldMapper.Defaults.FIELD_TYPE)); doc = testParsedDocument("1", null, document, B_1, null); - engine.index(new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED)); + engine.index(new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED)); // its not there... searchResult = engine.acquireSearcher("test"); @@ -994,7 +994,7 @@ public class InternalEngineTests extends EngineTestCase { // don't release the search result yet... // delete, refresh and do a new search, it should not be there - engine.delete(new Engine.Delete("test", "1", newUid(doc))); + engine.delete(new Engine.Delete("test", "1", newUid(doc), primaryTerm.get())); engine.refresh("test"); Engine.Searcher updateSearchResult = engine.acquireSearcher("test"); MatcherAssert.assertThat(updateSearchResult, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(0)); @@ -1113,7 +1113,7 @@ public class InternalEngineTests extends EngineTestCase { engine.index(doc4); assertEquals(engine.getLastWriteNanos(), doc4.startTime()); } else { - Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid()); + Engine.Delete delete = new Engine.Delete(doc1.type(), doc1.id(), doc1.uid(), primaryTerm.get()); engine.delete(delete); assertEquals(engine.getLastWriteNanos(), delete.startTime()); } @@ -1147,7 +1147,7 @@ public class InternalEngineTests extends EngineTestCase { } if (randomBoolean()) { final String translogUUID = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), - SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); } trimUnsafeCommits(config); @@ -1177,7 +1177,7 @@ public class InternalEngineTests extends EngineTestCase { public void testVersioningNewCreate() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); @@ -1189,7 +1189,7 @@ public class InternalEngineTests extends EngineTestCase { public void testReplicatedVersioningWithFlush() throws IOException { ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); assertTrue(indexResult.isCreated()); @@ -1208,7 +1208,7 @@ public class InternalEngineTests extends EngineTestCase { replicaEngine.flush(); } - Engine.Index update = new Engine.Index(newUid(doc), doc, 1); + Engine.Index update = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 1); Engine.IndexResult updateResult = engine.index(update); assertThat(updateResult.getVersion(), equalTo(2L)); assertFalse(updateResult.isCreated()); @@ -1237,14 +1237,14 @@ public class InternalEngineTests extends EngineTestCase { final BiFunction searcherFactory = engine::acquireSearcher; ParsedDocument doc = testParsedDocument("1", null, testDocument(), B_1, null); - Engine.Index create = new Engine.Index(newUid(doc), doc, Versions.MATCH_DELETED); + Engine.Index create = new Engine.Index(newUid(doc), primaryTerm.get(), doc, Versions.MATCH_DELETED); Engine.IndexResult indexResult = engine.index(create); assertThat(indexResult.getVersion(), equalTo(1L)); try (Engine.GetResult get = engine.get(new Engine.Get(true, false, doc.type(), doc.id(), create.uid()), searcherFactory)) { assertEquals(1, get.version()); } - Engine.Index update_1 = new Engine.Index(newUid(doc), doc, 1); + Engine.Index update_1 = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 1); Engine.IndexResult update_1_result = engine.index(update_1); assertThat(update_1_result.getVersion(), equalTo(2L)); @@ -1252,7 +1252,7 @@ public class InternalEngineTests extends EngineTestCase { assertEquals(2, get.version()); } - Engine.Index update_2 = new Engine.Index(newUid(doc), doc, 2); + Engine.Index update_2 = new Engine.Index(newUid(doc), primaryTerm.get(), doc, 2); Engine.IndexResult update_2_result = engine.index(update_2); assertThat(update_2_result.getVersion(), equalTo(3L)); @@ -1293,7 +1293,7 @@ public class InternalEngineTests extends EngineTestCase { ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), B_1, null); Engine.Index index = indexForDoc(doc); - engine.delete(new Engine.Delete(index.type(), index.id(), index.uid())); + engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); engine.forceMerge(true, 10, true, false, false); //expunge deletes engine.refresh("test"); @@ -1305,7 +1305,7 @@ public class InternalEngineTests extends EngineTestCase { doc = testParsedDocument(Integer.toString(1), null, testDocument(), B_1, null); index = indexForDoc(doc); - engine.delete(new Engine.Delete(index.type(), index.id(), index.uid())); + engine.delete(new Engine.Delete(index.type(), index.id(), index.uid(), primaryTerm.get())); engine.forceMerge(true, 10, false, false, false); //expunge deletes engine.refresh("test"); assertEquals(engine.segments(true).size(), 1); @@ -1892,7 +1892,7 @@ public class InternalEngineTests extends EngineTestCase { indexResult = engine.index(index); assertFalse(indexResult.isCreated()); - engine.delete(new Engine.Delete("doc", "1", newUid(doc))); + engine.delete(new Engine.Delete("doc", "1", newUid(doc), primaryTerm.get())); index = indexForDoc(doc); indexResult = engine.index(index); @@ -2368,7 +2368,7 @@ public class InternalEngineTests extends EngineTestCase { { store.createEmpty(); final String translogUUID = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); ParsedDocument doc = testParsedDocument(Integer.toString(0), null, testDocument(), new BytesArray("{}"), null); Engine.Index firstIndexRequest = new Engine.Index(newUid(doc), doc, SequenceNumbers.UNASSIGNED_SEQ_NO, 0, @@ -2409,7 +2409,7 @@ public class InternalEngineTests extends EngineTestCase { // open index with new tlog { final String translogUUID = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); trimUnsafeCommits(config); try (InternalEngine engine = new InternalEngine(config)) { @@ -2444,7 +2444,8 @@ public class InternalEngineTests extends EngineTestCase { // test that we can force start the engine , even if the translog is missing. engine.close(); // fake a new translog, causing the engine to point to a missing one. - Translog translog = createTranslog(); + final long primaryTerm = randomNonNegativeLong(); + Translog translog = createTranslog(() -> primaryTerm); long id = translog.currentFileGeneration(); translog.close(); IOUtils.rm(translog.location().resolve(Translog.getFilename(id))); @@ -2455,7 +2456,7 @@ public class InternalEngineTests extends EngineTestCase { // expected } // when a new translog is created it should be ok - final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId); + final String translogUUID = Translog.createEmptyTranslog(primaryTranslogDir, SequenceNumbers.UNASSIGNED_SEQ_NO, shardId, primaryTerm); store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null); engine = new InternalEngine(config); @@ -2521,7 +2522,7 @@ public class InternalEngineTests extends EngineTestCase { final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final LongSupplier globalCheckpointSupplier = () -> globalCheckpoint.get(); store.createEmpty(); - final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); try (InternalEngine engine = new InternalEngine(config(indexSettings, store, translogPath, newMergePolicy(), null, null, @@ -2659,7 +2660,7 @@ public class InternalEngineTests extends EngineTestCase { } parser = (TranslogHandler) engine.config().getTranslogRecoveryRunner(); assertEquals(flush ? 1 : 2, parser.appliedOperations()); - engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc))); + engine.delete(new Engine.Delete("test", Integer.toString(randomId), newUid(doc), primaryTerm.get())); if (randomBoolean()) { engine.refresh("test"); } else { @@ -2685,11 +2686,11 @@ public class InternalEngineTests extends EngineTestCase { engine.close(); final Path badTranslogLog = createTempDir(); - final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String badUUID = Translog.createEmptyTranslog(badTranslogLog, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); Translog translog = new Translog( new TranslogConfig(shardId, badTranslogLog, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE), - badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); - translog.add(new Translog.Index("test", "SomeBogusId", 0, "{}".getBytes(Charset.forName("UTF-8")))); + badUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog.add(new Translog.Index("test", "SomeBogusId", 0, primaryTerm.get(), "{}".getBytes(Charset.forName("UTF-8")))); assertEquals(generation.translogFileGeneration, translog.currentFileGeneration()); translog.close(); @@ -2703,7 +2704,7 @@ public class InternalEngineTests extends EngineTestCase { new CodecService(null, logger), config.getEventListener(), IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), config.getExternalRefreshListener(), config.getInternalRefreshListener(), null, config.getTranslogRecoveryRunner(), - new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + new NoneCircuitBreakerService(), () -> SequenceNumbers.UNASSIGNED_SEQ_NO, primaryTerm::get); try { InternalEngine internalEngine = new InternalEngine(brokenConfig); fail("translog belongs to a different engine"); @@ -2886,11 +2887,11 @@ public class InternalEngineTests extends EngineTestCase { final Engine.DeleteResult deleteResult; if (randomBoolean()) { throwingIndexWriter.get().setThrowFailure(() -> new IOException("simulated")); - deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1))); + deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1), primaryTerm.get())); assertThat(deleteResult.getFailure(), instanceOf(IOException.class)); } else { throwingIndexWriter.get().setThrowFailure(() -> new IllegalArgumentException("simulated max token length")); - deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1))); + deleteResult = engine.delete(new Engine.Delete("test", "1", newUid(doc1), primaryTerm.get())); assertThat(deleteResult.getFailure(), instanceOf(IllegalArgumentException.class)); } @@ -2923,7 +2924,7 @@ public class InternalEngineTests extends EngineTestCase { if (randomBoolean()) { engine.index(indexForDoc(doc1)); } else { - engine.delete(new Engine.Delete("test", "", newUid(doc1))); + engine.delete(new Engine.Delete("test", "", newUid(doc1), primaryTerm.get())); } fail("engine should be closed"); } catch (Exception e) { @@ -3324,7 +3325,7 @@ public class InternalEngineTests extends EngineTestCase { } try (Store store = createStore(newFSDirectory(storeDir))) { if (randomBoolean() || true) { - final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUUID = Translog.createEmptyTranslog(translogDir, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); } try (Engine engine = new InternalEngine(configSupplier.apply(store))) { @@ -3478,7 +3479,7 @@ public class InternalEngineTests extends EngineTestCase { seqID = getSequenceID(engine, newGet(false, doc)); logger.info("--> got seqID: {}", seqID); assertThat(seqID.v1(), equalTo(0L)); - assertThat(seqID.v2(), equalTo(2L)); + assertThat(seqID.v2(), equalTo(primaryTerm.get())); // Index the same document again document = testDocumentWithTextField(); @@ -3490,7 +3491,7 @@ public class InternalEngineTests extends EngineTestCase { seqID = getSequenceID(engine, newGet(false, doc)); logger.info("--> got seqID: {}", seqID); assertThat(seqID.v1(), equalTo(1L)); - assertThat(seqID.v2(), equalTo(2L)); + assertThat(seqID.v2(), equalTo(primaryTerm.get())); // Index the same document for the third time, this time changing the primary term document = testDocumentWithTextField(); @@ -3704,13 +3705,12 @@ public class InternalEngineTests extends EngineTestCase { } }; noOpEngine.recoverFromTranslog(); - final long primaryTerm = randomNonNegativeLong(); - final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm); + final int gapsFilled = noOpEngine.fillSeqNoGaps(primaryTerm.get()); final String reason = randomAlphaOfLength(16); noOpEngine.noOp( new Engine.NoOp( maxSeqNo + 1, - primaryTerm, + primaryTerm.get(), randomFrom(PRIMARY, REPLICA, PEER_RECOVERY, LOCAL_TRANSLOG_RECOVERY), System.nanoTime(), reason)); @@ -3728,7 +3728,7 @@ public class InternalEngineTests extends EngineTestCase { assertThat(last, instanceOf(Translog.NoOp.class)); final Translog.NoOp noOp = (Translog.NoOp) last; assertThat(noOp.seqNo(), equalTo((long) (maxSeqNo + 1))); - assertThat(noOp.primaryTerm(), equalTo(primaryTerm)); + assertThat(noOp.primaryTerm(), equalTo(primaryTerm.get())); assertThat(noOp.reason(), equalTo(reason)); } finally { IOUtils.close(noOpEngine); @@ -3931,7 +3931,7 @@ public class InternalEngineTests extends EngineTestCase { if (operation.opType() == Translog.Operation.Type.NO_OP) { assertEquals(2, operation.primaryTerm()); } else { - assertEquals(1, operation.primaryTerm()); + assertEquals(primaryTerm.get(), operation.primaryTerm()); } } @@ -4131,7 +4131,7 @@ public class InternalEngineTests extends EngineTestCase { store = createStore(); final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); store.createEmpty(); - final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId); + final String translogUUID = Translog.createEmptyTranslog(translogPath, globalCheckpoint.get(), shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUUID); final EngineConfig engineConfig = config(indexSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, @@ -4201,7 +4201,7 @@ public class InternalEngineTests extends EngineTestCase { Engine.Index operation = appendOnlyPrimary(doc, false, 1); engine.index(operation); if (rarely()) { - engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid())); + engine.delete(new Engine.Delete(operation.type(), operation.id(), operation.uid(), primaryTerm.get())); numDeletes.incrementAndGet(); } else { doc = testParsedDocument(docID, null, testDocumentWithTextField("updated"), @@ -4340,7 +4340,7 @@ public class InternalEngineTests extends EngineTestCase { engine.index(indexForDoc(doc)); } assertThat("Not exceeded translog flush threshold yet", engine.shouldPeriodicallyFlush(), equalTo(false)); - long flushThreshold = RandomNumbers.randomLongBetween(random(), 100, + long flushThreshold = RandomNumbers.randomLongBetween(random(), 120, engine.getTranslog().stats().getUncommittedSizeInBytes()- extraTranslogSizeInNewEngine); final IndexSettings indexSettings = engine.config().getIndexSettings(); final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) @@ -4382,7 +4382,7 @@ public class InternalEngineTests extends EngineTestCase { } public void testStressShouldPeriodicallyFlush() throws Exception { - final long flushThreshold = randomLongBetween(100, 5000); + final long flushThreshold = randomLongBetween(120, 5000); final long generationThreshold = randomLongBetween(1000, 5000); final IndexSettings indexSettings = engine.config().getIndexSettings(); final IndexMetaData indexMetaData = IndexMetaData.builder(indexSettings.getIndexMetaData()) @@ -4423,7 +4423,7 @@ public class InternalEngineTests extends EngineTestCase { Versions.MATCH_ANY, VersionType.INTERNAL, Engine.Operation.Origin.PRIMARY, System.nanoTime(), 0, false); // first index an append only document and then delete it. such that we have it in the tombstones engine.index(doc); - engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid())); + engine.delete(new Engine.Delete(doc.type(), doc.id(), doc.uid(), primaryTerm.get())); // now index more append only docs and refresh so we re-enabel the optimization for unsafe version map ParsedDocument document1 = testParsedDocument(Integer.toString(1), null, testDocumentWithTextField(), SOURCE, null); @@ -4559,7 +4559,7 @@ public class InternalEngineTests extends EngineTestCase { if (randomBoolean()) { engine.index(indexForDoc(parsedDocument)); } else { - engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()))); + engine.delete(new Engine.Delete(parsedDocument.type(), parsedDocument.id(), newUid(parsedDocument.id()), primaryTerm.get())); } } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java index 66e2a09750a..c7469f2432a 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/elasticsearch/index/replication/RecoveryDuringReplicationTests.java @@ -185,7 +185,7 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC false, SourceToParse.source("index", "type", "replica", new BytesArray("{}"), XContentType.JSON), mapping -> {}); - shards.promoteReplicaToPrimary(promotedReplica); + shards.promoteReplicaToPrimary(promotedReplica).get(); oldPrimary.close("demoted", randomBoolean()); oldPrimary.store().close(); shards.removeReplica(remainingReplica); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java index cc241d4ad74..f7ee54b32ee 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardIT.java @@ -335,7 +335,7 @@ public class IndexShardIT extends ESSingleNodeTestCase { assertFalse(shard.shouldPeriodicallyFlush()); client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() .put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), - new ByteSizeValue(160 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get(); + new ByteSizeValue(190 /* size of the operation + two generations header&footer*/, ByteSizeUnit.BYTES)).build()).get(); client().prepareIndex("test", "test", "0") .setSource("{}", XContentType.JSON).setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get(); assertFalse(shard.shouldPeriodicallyFlush()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 10c76919966..08f0319d6cf 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -90,6 +90,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.store.StoreStats; +import org.elasticsearch.index.translog.TestTranslog; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogTests; import org.elasticsearch.indices.IndicesQueryCache; @@ -519,6 +520,7 @@ public class IndexShardTests extends IndexShardTestCase { // promote the replica final ShardRouting replicaRouting = indexShard.routingEntry(); + final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 10000); final ShardRouting primaryRouting = newShardRouting( replicaRouting.shardId(), @@ -527,7 +529,7 @@ public class IndexShardTests extends IndexShardTestCase { true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, + indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); @@ -553,6 +555,7 @@ public class IndexShardTests extends IndexShardTestCase { latch.await(); assertThat(indexShard.getTranslog().getGeneration().translogFileGeneration, equalTo(currentTranslogGeneration + 1)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); closeShards(indexShard); } @@ -571,7 +574,10 @@ public class IndexShardTests extends IndexShardTestCase { ShardRouting replicaRouting = indexShard.routingEntry(); ShardRouting primaryRouting = newShardRouting(replicaRouting.shardId(), replicaRouting.currentNodeId(), null, true, ShardRoutingState.STARTED, replicaRouting.allocationId()); - indexShard.updateShardState(primaryRouting, indexShard.getPrimaryTerm() + 1, (shard, listener) -> {}, 0L, + final long newPrimaryTerm = indexShard.getPrimaryTerm() + between(1, 1000); + indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> { + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); + }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); @@ -739,6 +745,7 @@ public class IndexShardTests extends IndexShardTestCase { @Override public void onResponse(Releasable releasable) { assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); assertThat(indexShard.getLocalCheckpoint(), equalTo(expectedLocalCheckpoint)); assertThat(indexShard.getGlobalCheckpoint(), equalTo(newGlobalCheckPoint)); onResponse.set(true); @@ -784,15 +791,18 @@ public class IndexShardTests extends IndexShardTestCase { assertFalse(onResponse.get()); assertNull(onFailure.get()); assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm)); Releasables.close(operation1); // our operation should still be blocked assertFalse(onResponse.get()); assertNull(onFailure.get()); assertThat(indexShard.getPrimaryTerm(), equalTo(primaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(primaryTerm)); Releasables.close(operation2); barrier.await(); // now lock acquisition should have succeeded assertThat(indexShard.getPrimaryTerm(), equalTo(newPrimaryTerm)); + assertThat(TestTranslog.getCurrentTerm(indexShard.getTranslog()), equalTo(newPrimaryTerm)); if (engineClosed) { assertFalse(onResponse.get()); assertThat(onFailure.get(), instanceOf(AlreadyClosedException.class)); @@ -1739,7 +1749,7 @@ public class IndexShardTests extends IndexShardTestCase { flushShard(shard); assertThat(getShardDocUIDs(shard), containsInAnyOrder("doc-0", "doc-1")); // Simulate resync (without rollback): Noop #1, index #2 - shard.primaryTerm++; + acquireReplicaOperationPermitBlockingly(shard, shard.primaryTerm + 1); shard.markSeqNoAsNoop(1, "test"); shard.applyIndexOperationOnReplica(2, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false, SourceToParse.source(indexName, "doc", "doc-2", new BytesArray("{}"), XContentType.JSON), mapping); @@ -2052,18 +2062,18 @@ public class IndexShardTests extends IndexShardTestCase { IndexMetaData metaData = IndexMetaData.builder("test") .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") .settings(settings) - .primaryTerm(0, 1).build(); + .primaryTerm(0, randomLongBetween(1, Long.MAX_VALUE)).build(); IndexShard primary = newShard(new ShardId(metaData.getIndex(), 0), true, "n1", metaData, null); List operations = new ArrayList<>(); int numTotalEntries = randomIntBetween(0, 10); int numCorruptEntries = 0; for (int i = 0; i < numTotalEntries; i++) { if (randomBoolean()) { - operations.add(new Translog.Index("test", "1", 0, 1, VersionType.INTERNAL, + operations.add(new Translog.Index("test", "1", 0, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, "{\"foo\" : \"bar\"}".getBytes(Charset.forName("UTF-8")), null, -1)); } else { // corrupt entry - operations.add(new Translog.Index("test", "2", 1, 1, VersionType.INTERNAL, + operations.add(new Translog.Index("test", "2", 1, primary.getPrimaryTerm(), 1, VersionType.INTERNAL, "{\"foo\" : \"bar}".getBytes(Charset.forName("UTF-8")), null, -1)); numCorruptEntries++; } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java index ed60d38dfa4..91e439dcda9 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexingOperationListenerTests.java @@ -136,8 +136,8 @@ public class IndexingOperationListenerTests extends ESTestCase{ IndexingOperationListener.CompositeListener compositeListener = new IndexingOperationListener.CompositeListener(indexingOperationListeners, logger); ParsedDocument doc = InternalEngineTests.createParsedDoc("1", null); - Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id()))); - Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc); + Engine.Delete delete = new Engine.Delete("test", "1", new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong()); + Engine.Index index = new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); compositeListener.postDelete(randomShardId, delete, new Engine.DeleteResult(1, SequenceNumbers.UNASSIGNED_SEQ_NO, true)); assertEquals(0, preIndex.get()); assertEquals(0, postIndex.get()); diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 1bd98cd1c9e..5803bf26363 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -122,14 +122,15 @@ public class RefreshListenersTests extends ESTestCase { } }; store.createEmpty(); + final long primaryTerm = randomNonNegativeLong(); final String translogUUID = - Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(translogConfig.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm); store.associateIndexWithNewTranslog(translogUUID); EngineConfig config = new EngineConfig(shardId, allocationId, threadPool, indexSettings, null, store, newMergePolicy(), iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), eventListener, IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), Collections.singletonList(listeners), Collections.emptyList(), null, - (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED); + (e, s) -> 0, new NoneCircuitBreakerService(), () -> SequenceNumbers.NO_OPS_PERFORMED, () -> primaryTerm); engine = new InternalEngine(config); engine.recoverFromTranslog(); listeners.setTranslog(engine.getTranslog()); @@ -363,7 +364,7 @@ public class RefreshListenersTests extends ESTestCase { BytesReference source = new BytesArray(new byte[] { 1 }); ParsedDocument doc = new ParsedDocument(versionField, seqID, id, "test", null, Arrays.asList(document), source, XContentType.JSON, null); - Engine.Index index = new Engine.Index(new Term("_id", doc.id()), doc); + Engine.Index index = new Engine.Index(new Term("_id", doc.id()), engine.config().getPrimaryTermSupplier().getAsLong(), doc); return engine.index(index); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java index 4077d033da9..7ab9fa67330 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TestTranslog.java @@ -83,26 +83,7 @@ public class TestTranslog { int corruptions = RandomNumbers.randomIntBetween(random, 5, 20); for (int i = 0; i < corruptions; i++) { Path fileToCorrupt = RandomPicks.randomFrom(random, candidates); - try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) { - // read - raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1)); - long filePointer = raf.position(); - ByteBuffer bb = ByteBuffer.wrap(new byte[1]); - raf.read(bb); - bb.flip(); - - // corrupt - byte oldValue = bb.get(0); - byte newValue = (byte) (oldValue + 1); - bb.put(0, newValue); - - // rewrite - raf.position(filePointer); - raf.write(bb); - logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}", - fileToCorrupt, filePointer, Integer.toHexString(oldValue), - Integer.toHexString(newValue), fileToCorrupt); - } + corruptFile(logger, random, fileToCorrupt); corruptedFiles.add(fileToCorrupt); } } @@ -110,6 +91,29 @@ public class TestTranslog { return corruptedFiles; } + static void corruptFile(Logger logger, Random random, Path fileToCorrupt) throws IOException { + try (FileChannel raf = FileChannel.open(fileToCorrupt, StandardOpenOption.READ, StandardOpenOption.WRITE)) { + // read + raf.position(RandomNumbers.randomLongBetween(random, 0, raf.size() - 1)); + long filePointer = raf.position(); + ByteBuffer bb = ByteBuffer.wrap(new byte[1]); + raf.read(bb); + bb.flip(); + + // corrupt + byte oldValue = bb.get(0); + byte newValue = (byte) (oldValue + 1); + bb.put(0, newValue); + + // rewrite + raf.position(filePointer); + raf.write(bb); + logger.info("--> corrupting file {} -- flipping at position {} from {} to {} file: {}", + fileToCorrupt, filePointer, Integer.toHexString(oldValue), + Integer.toHexString(newValue), fileToCorrupt); + } + } + /** * Lists all existing commits in a given index path, then read the minimum translog generation that will be used in recoverFromTranslog. */ @@ -122,4 +126,11 @@ public class TestTranslog { return Long.parseLong(recoveringCommit.getUserData().get(Translog.TRANSLOG_GENERATION_KEY)); } } + + /** + * Returns the primary term associated with the current translog writer of the given translog. + */ + public static long getCurrentTerm(Translog translog) { + return translog.getCurrent().getPrimaryTerm(); + } } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index 2f6f4ee3178..9ae502fecb5 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -171,7 +171,7 @@ public class TranslogDeletionPolicyTests extends ESTestCase { } writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, - () -> 1L); + () -> 1L, randomNonNegativeLong()); writer = Mockito.spy(writer); Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime(); diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java new file mode 100644 index 00000000000..0dc404767de --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogHeaderTests.java @@ -0,0 +1,128 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.apache.lucene.codecs.CodecUtil; +import org.apache.lucene.store.OutputStreamDataOutput; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThan; + +public class TranslogHeaderTests extends ESTestCase { + + public void testCurrentHeaderVersion() throws Exception { + final String translogUUID = UUIDs.randomBase64UUID(); + final TranslogHeader outHeader = new TranslogHeader(translogUUID, randomNonNegativeLong()); + final long generation = randomNonNegativeLong(); + final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { + outHeader.write(channel); + assertThat(outHeader.sizeInBytes(), equalTo((int)channel.position())); + } + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel); + assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); + assertThat(inHeader.getPrimaryTerm(), equalTo(outHeader.getPrimaryTerm())); + assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position())); + } + final TranslogCorruptedException mismatchUUID = expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel); + } + }); + assertThat(mismatchUUID.getMessage(), containsString("this translog file belongs to a different translog")); + int corruptions = between(1, 10); + for (int i = 0; i < corruptions; i++) { + TestTranslog.corruptFile(logger, random(), translogFile); + } + expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(outHeader.getTranslogUUID(), translogFile, channel); + } + }); + } + + public void testHeaderWithoutPrimaryTerm() throws Exception { + final String translogUUID = UUIDs.randomBase64UUID(); + final long generation = randomNonNegativeLong(); + final Path translogFile = createTempDir().resolve(Translog.getFilename(generation)); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { + writeHeaderWithoutTerm(channel, translogUUID); + assertThat((int)channel.position(), lessThan(TranslogHeader.headerSizeInBytes(translogUUID))); + } + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + final TranslogHeader inHeader = TranslogHeader.read(translogUUID, translogFile, channel); + assertThat(inHeader.getTranslogUUID(), equalTo(translogUUID)); + assertThat(inHeader.getPrimaryTerm(), equalTo(TranslogHeader.UNKNOWN_PRIMARY_TERM)); + assertThat(inHeader.sizeInBytes(), equalTo((int)channel.position())); + } + expectThrows(TranslogCorruptedException.class, () -> { + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogHeader.read(UUIDs.randomBase64UUID(), translogFile, channel); + } + }); + } + + static void writeHeaderWithoutTerm(FileChannel channel, String translogUUID) throws IOException { + final OutputStreamStreamOutput out = new OutputStreamStreamOutput(Channels.newOutputStream(channel)); + CodecUtil.writeHeader(new OutputStreamDataOutput(out), TranslogHeader.TRANSLOG_CODEC, TranslogHeader.VERSION_CHECKPOINTS); + final BytesRef uuid = new BytesRef(translogUUID); + out.writeInt(uuid.length); + out.writeBytes(uuid.bytes, uuid.offset, uuid.length); + channel.force(true); + assertThat(channel.position(), equalTo(43L)); + } + + public void testLegacyTranslogVersions() throws Exception { + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", IllegalStateException.class, "pre-1.4 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", IllegalStateException.class, "pre-2.0 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", IllegalStateException.class, "pre-2.0 translog"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary", + TranslogCorruptedException.class, "translog looks like version 1 or later, but has corrupted header"); + checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary", + IllegalStateException.class, "pre-2.0 translog"); + } + + private void checkFailsToOpen(String file, Class expectedErrorType, String expectedMessage) { + final Path translogFile = getDataPath(file); + assertThat("test file [" + translogFile + "] should exist", Files.exists(translogFile), equalTo(true)); + final E error = expectThrows(expectedErrorType, () -> { + final Checkpoint checkpoint = new Checkpoint(Files.size(translogFile), 1, 1, + SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, SequenceNumbers.NO_OPS_PERFORMED, 1); + try (FileChannel channel = FileChannel.open(translogFile, StandardOpenOption.READ)) { + TranslogReader.open(channel, translogFile, checkpoint, null); + } + }); + assertThat(error.getMessage(), containsString(expectedMessage)); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index faff5400282..b3b9fca886e 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -109,6 +109,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; import java.util.stream.LongStream; +import java.util.stream.Stream; import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; import static org.elasticsearch.index.translog.SnapshotMatchers.containsOperationsInAnyOrder; @@ -132,6 +133,8 @@ public class TranslogTests extends ESTestCase { protected Translog translog; private AtomicLong globalCheckpoint; protected Path translogDir; + // A default primary term is used by translog instances created in this test. + private final AtomicLong primaryTerm = new AtomicLong(); @Override protected void afterIfSuccessful() throws Exception { @@ -152,14 +155,14 @@ public class TranslogTests extends ESTestCase { protected Translog createTranslog(TranslogConfig config) throws IOException { String translogUUID = - Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); } protected Translog openTranslog(TranslogConfig config, String translogUUID) throws IOException { return new Translog(config, translogUUID, createTranslogDeletionPolicy(config.getIndexSettings()), - () -> SequenceNumbers.NO_OPS_PERFORMED); + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); } @@ -188,6 +191,7 @@ public class TranslogTests extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); + primaryTerm.set(randomLongBetween(1, Integer.MAX_VALUE)); // if a previous test failed we clean up things here translogDir = createTempDir(); translog = create(translogDir); @@ -208,8 +212,8 @@ public class TranslogTests extends ESTestCase { globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); final TranslogConfig translogConfig = getTranslogConfig(path); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(translogConfig.getIndexSettings()); - final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId); - return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get()); + final String translogUUID = Translog.createEmptyTranslog(path, SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); + return new Translog(translogConfig, translogUUID, deletionPolicy, () -> globalCheckpoint.get(), primaryTerm::get); } private TranslogConfig getTranslogConfig(final Path path) { @@ -303,22 +307,22 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot, SnapshotMatchers.size(0)); } - addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(ops.size())); } - addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, newUid("2"))); + addToTranslogAndList(translog, ops, new Translog.Delete("test", "2", 1, primaryTerm.get(), newUid("2"))); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); assertThat(snapshot.totalOperations(), equalTo(ops.size())); } final long seqNo = randomNonNegativeLong(); - final long primaryTerm = randomNonNegativeLong(); final String reason = randomAlphaOfLength(16); - addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, primaryTerm, reason)); + final long noopTerm = randomLongBetween(1, primaryTerm.get()); + addToTranslogAndList(translog, ops, new Translog.NoOp(seqNo, noopTerm, reason)); try (Translog.Snapshot snapshot = translog.newSnapshot()) { @@ -333,7 +337,7 @@ public class TranslogTests extends ESTestCase { Translog.NoOp noOp = (Translog.NoOp) snapshot.next(); assertNotNull(noOp); assertThat(noOp.seqNo(), equalTo(seqNo)); - assertThat(noOp.primaryTerm(), equalTo(primaryTerm)); + assertThat(noOp.primaryTerm(), equalTo(noopTerm)); assertThat(noOp.reason(), equalTo(reason)); assertNull(snapshot.next()); @@ -401,35 +405,35 @@ public class TranslogTests extends ESTestCase { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(0)); } - assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogWriter.TRANSLOG_CODEC))); - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + assertThat((int) firstOperationPosition, greaterThan(CodecUtil.headerLength(TranslogHeader.TRANSLOG_CODEC))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(1)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(139L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(163L)); assertThat(stats.getUncommittedOperations(), equalTo(1)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(139L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(163L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - translog.add(new Translog.Delete("test", "2", 1, newUid("2"))); + translog.add(new Translog.Delete("test", "2", 1, primaryTerm.get(), newUid("2"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(2)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(188L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(212L)); assertThat(stats.getUncommittedOperations(), equalTo(2)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(188L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(212L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - translog.add(new Translog.Delete("test", "3", 2, newUid("3"))); + translog.add(new Translog.Delete("test", "3", 2, primaryTerm.get(), newUid("3"))); { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(3)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(237L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(261L)); assertThat(stats.getUncommittedOperations(), equalTo(3)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(237L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(261L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } @@ -437,13 +441,13 @@ public class TranslogTests extends ESTestCase { { final TranslogStats stats = stats(); assertThat(stats.estimatedNumberOfOperations(), equalTo(4)); - assertThat(stats.getTranslogSizeInBytes(), equalTo(279L)); + assertThat(stats.getTranslogSizeInBytes(), equalTo(303L)); assertThat(stats.getUncommittedOperations(), equalTo(4)); - assertThat(stats.getUncommittedSizeInBytes(), equalTo(279L)); + assertThat(stats.getUncommittedSizeInBytes(), equalTo(303L)); assertThat(stats.getEarliestLastModifiedAge(), greaterThan(1L)); } - final long expectedSizeInBytes = 322L; + final long expectedSizeInBytes = 358L; translog.rollGeneration(); { final TranslogStats stats = stats(); @@ -494,7 +498,7 @@ public class TranslogTests extends ESTestCase { int uncommittedOps = 0; int operationsInLastGen = 0; for (int i = 0; i < operations; i++) { - translog.add(new Translog.Index("test", Integer.toString(i), i, new byte[]{1})); + translog.add(new Translog.Index("test", Integer.toString(i), i, primaryTerm.get(), new byte[]{1})); uncommittedOps++; operationsInLastGen++; if (rarely()) { @@ -563,7 +567,7 @@ public class TranslogTests extends ESTestCase { assertThat(snapshot, SnapshotMatchers.size(0)); } - addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.equalsTo(ops)); @@ -583,9 +587,9 @@ public class TranslogTests extends ESTestCase { public void testReadLocation() throws IOException { ArrayList ops = new ArrayList<>(); ArrayList locs = new ArrayList<>(); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1}))); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{1}))); - locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{1}))); + locs.add(addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{1}))); int i = 0; for (Translog.Operation op : ops) { assertEquals(op, translog.readOperation(locs.get(i++))); @@ -601,16 +605,16 @@ public class TranslogTests extends ESTestCase { toClose.add(snapshot); assertThat(snapshot, SnapshotMatchers.size(0)); - addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, new byte[]{1})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); Translog.Snapshot snapshot1 = translog.newSnapshot(); toClose.add(snapshot1); - addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, new byte[]{2})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2})); assertThat(snapshot1, SnapshotMatchers.equalsTo(ops.get(0))); translog.rollGeneration(); - addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, new byte[]{3})); + addToTranslogAndList(translog, ops, new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3})); Translog.Snapshot snapshot2 = translog.newSnapshot(); toClose.add(snapshot2); @@ -624,7 +628,7 @@ public class TranslogTests extends ESTestCase { public void testSnapshotOnClosedTranslog() throws IOException { assertTrue(Files.exists(translogDir.resolve(Translog.getFilename(1)))); - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); translog.close(); try { Translog.Snapshot snapshot = translog.newSnapshot(); @@ -747,7 +751,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8")))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); } translog.sync(); @@ -774,7 +778,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { String ascii = randomAlphaOfLengthBetween(1, 50); - locations.add(translog.add(new Translog.Index("test", "" + op, op, ascii.getBytes("UTF-8")))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), ascii.getBytes("UTF-8")))); } translog.sync(); @@ -838,7 +842,7 @@ public class TranslogTests extends ESTestCase { public void testVerifyTranslogIsNotDeleted() throws IOException { assertFileIsPresent(translog, 1); - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); try (Translog.Snapshot snapshot = translog.newSnapshot()) { assertThat(snapshot, SnapshotMatchers.size(1)); assertFileIsPresent(translog, 1); @@ -890,10 +894,10 @@ public class TranslogTests extends ESTestCase { switch (type) { case CREATE: case INDEX: - op = new Translog.Index("type", "" + id, id, new byte[]{(byte) id}); + op = new Translog.Index("type", "" + id, id, primaryTerm.get(), new byte[]{(byte) id}); break; case DELETE: - op = new Translog.Delete("test", Long.toString(id), id, newUid(Long.toString(id))); + op = new Translog.Delete("test", Long.toString(id), id, primaryTerm.get(), newUid(Long.toString(id))); break; case NO_OP: op = new Translog.NoOp(id, 1, Long.toString(id)); @@ -1052,13 +1056,13 @@ public class TranslogTests extends ESTestCase { for (int op = 0; op < translogOperations; op++) { int seqNo = ++count; final Translog.Location location = - translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { assertTrue("at least one operation pending", translog.syncNeeded()); assertTrue("this operation has not been synced", translog.ensureSynced(location)); assertFalse("the last call to ensureSycned synced all previous ops", translog.syncNeeded()); // we are the last location so everything should be synced seqNo = ++count; - translog.add(new Translog.Index("test", "" + op, seqNo, Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, seqNo, primaryTerm.get(), Integer.toString(seqNo).getBytes(Charset.forName("UTF-8")))); assertTrue("one pending operation", translog.syncNeeded()); assertFalse("this op has been synced before", translog.ensureSynced(location)); // not syncing now assertTrue("we only synced a previous operation yet", translog.syncNeeded()); @@ -1087,7 +1091,7 @@ public class TranslogTests extends ESTestCase { rollAndCommit(translog); // do this first so that there is at least one pending tlog entry } final Translog.Location location = - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8")))); locations.add(location); } Collections.shuffle(locations, random()); @@ -1115,7 +1119,7 @@ public class TranslogTests extends ESTestCase { int count = 0; for (int op = 0; op < translogOperations; op++) { locations.add( - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(++count).getBytes(Charset.forName("UTF-8"))))); if (rarely() && translogOperations > op + 1) { rollAndCommit(translog); } @@ -1152,7 +1156,7 @@ public class TranslogTests extends ESTestCase { int lastSynced = -1; long lastSyncedGlobalCheckpoint = globalCheckpoint.get(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { globalCheckpoint.set(globalCheckpoint.get() + randomIntBetween(1, 16)); } @@ -1163,8 +1167,8 @@ public class TranslogTests extends ESTestCase { } } assertEquals(translogOperations, translog.totalOperations()); - translog.add(new Translog.Index( - "test", "" + translogOperations, translogOperations, Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + translogOperations, translogOperations, primaryTerm.get(), + Integer.toString(translogOperations).getBytes(Charset.forName("UTF-8")))); final Checkpoint checkpoint = Checkpoint.read(translog.location().resolve(Translog.CHECKPOINT_FILE_NAME)); try (TranslogReader reader = translog.openReader(translog.location().resolve(Translog.getFilename(translog.currentFileGeneration())), checkpoint)) { @@ -1288,7 +1292,7 @@ public class TranslogTests extends ESTestCase { int minUncommittedOp = -1; final boolean commitOften = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); final boolean commit = commitOften ? frequently() : rarely(); if (commit && op < translogOperations - 1) { rollAndCommit(translog); @@ -1309,7 +1313,7 @@ public class TranslogTests extends ESTestCase { assertNull(snapshot.next()); } } else { - translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + translog = new Translog(config, translogGeneration.translogUUID, translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); try (Translog.Snapshot snapshot = translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { @@ -1331,7 +1335,7 @@ public class TranslogTests extends ESTestCase { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1348,7 +1352,7 @@ public class TranslogTests extends ESTestCase { TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1362,7 +1366,7 @@ public class TranslogTests extends ESTestCase { } } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1387,7 +1391,7 @@ public class TranslogTests extends ESTestCase { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1408,7 +1412,7 @@ public class TranslogTests extends ESTestCase { final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1423,7 +1427,7 @@ public class TranslogTests extends ESTestCase { } if (randomBoolean()) { // recover twice - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 3 less than current - we never finished the commit and run recovery twice", translogGeneration.translogFileGeneration + 3, translog.currentFileGeneration()); @@ -1447,7 +1451,7 @@ public class TranslogTests extends ESTestCase { Translog.TranslogGeneration translogGeneration = null; final boolean sync = randomBoolean(); for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (op == prepareOp) { translogGeneration = translog.getGeneration(); translog.rollGeneration(); @@ -1466,15 +1470,15 @@ public class TranslogTests extends ESTestCase { Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), corrupted, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog ignored = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { fail("corrupted"); } catch (IllegalStateException ex) { - assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3068, " + + assertEquals("Checkpoint file translog-3.ckp already exists but has corrupted content expected: Checkpoint{offset=3080, " + "numOps=55, generation=3, minSeqNo=45, maxSeqNo=99, globalCheckpoint=-1, minTranslogGeneration=1} but got: Checkpoint{offset=0, numOps=0, " + "generation=0, minSeqNo=-1, maxSeqNo=-1, globalCheckpoint=-1, minTranslogGeneration=0}", ex.getMessage()); } Checkpoint.write(FileChannel::open, config.getTranslogPath().resolve(Translog.getCommitCheckpointFileName(read.generation)), read, StandardOpenOption.WRITE, StandardOpenOption.TRUNCATE_EXISTING); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertNotNull(translogGeneration); assertEquals("lastCommitted must be 2 less than current - we never finished the commit", translogGeneration.translogFileGeneration + 2, translog.currentFileGeneration()); assertFalse(translog.syncNeeded()); @@ -1494,7 +1498,7 @@ public class TranslogTests extends ESTestCase { List ops = new ArrayList<>(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations; op++) { - Translog.Index test = new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))); + Translog.Index test = new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))); ops.add(test); } Translog.writeOperations(out, ops); @@ -1509,8 +1513,8 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(10, 100); try (Translog translog2 = create(createTempDir())) { for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); - locations2.add(translog2.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations2.add(translog2.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); } int iters = randomIntBetween(10, 100); for (int i = 0; i < iters; i++) { @@ -1536,7 +1540,7 @@ public class TranslogTests extends ESTestCase { int translogOperations = randomIntBetween(1, 10); int firstUncommitted = 0; for (int op = 0; op < translogOperations; op++) { - locations.add(translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); + locations.add(translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8"))))); if (randomBoolean()) { rollAndCommit(translog); firstUncommitted = op + 1; @@ -1551,12 +1555,12 @@ public class TranslogTests extends ESTestCase { final String foreignTranslog = randomRealisticUnicodeOfCodepointLengthBetween(1, translogGeneration.translogUUID.length()); try { - new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + new Translog(config, foreignTranslog, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("translog doesn't belong to this UUID"); } catch (TranslogCorruptedException ex) { } - this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + this.translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); try (Translog.Snapshot snapshot = this.translog.newSnapshotFromGen(translogGeneration.translogFileGeneration)) { for (int i = firstUncommitted; i < translogOperations; i++) { Translog.Operation next = snapshot.next(); @@ -1568,10 +1572,10 @@ public class TranslogTests extends ESTestCase { } public void testFailOnClosedWrite() throws IOException { - translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); translog.close(); try { - translog.add(new Translog.Index("test", "1", 0, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); fail("closed"); } catch (AlreadyClosedException ex) { // all is well @@ -1609,7 +1613,7 @@ public class TranslogTests extends ESTestCase { } } - private static class TranslogThread extends Thread { + private class TranslogThread extends Thread { private final CountDownLatch downLatch; private final int opsPerThread; private final int threadId; @@ -1640,19 +1644,19 @@ public class TranslogTests extends ESTestCase { case CREATE: case INDEX: op = new Translog.Index("test", threadId + "_" + opCount, seqNoGenerator.getAndIncrement(), - randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8")); + primaryTerm.get(), randomUnicodeOfLengthBetween(1, 20 * 1024).getBytes("UTF-8")); break; case DELETE: op = new Translog.Delete( "test", threadId + "_" + opCount, new Term("_uid", threadId + "_" + opCount), seqNoGenerator.getAndIncrement(), - 0, + primaryTerm.get(), 1 + randomInt(100000), randomFrom(VersionType.values())); break; case NO_OP: - op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), randomNonNegativeLong(), randomAlphaOfLength(16)); + op = new Translog.NoOp(seqNoGenerator.getAndIncrement(), primaryTerm.get(), randomAlphaOfLength(16)); break; default: throw new AssertionError("unsupported operation type [" + type + "]"); @@ -1690,7 +1694,7 @@ public class TranslogTests extends ESTestCase { while (failed == false) { try { locations.add(translog.add( - new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); translog.sync(); opsSynced++; } catch (MockDirectoryWrapper.FakeIOException ex) { @@ -1711,7 +1715,7 @@ public class TranslogTests extends ESTestCase { if (randomBoolean()) { try { locations.add(translog.add( - new Translog.Index("test", "" + opsSynced, opsSynced, Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsSynced, opsSynced, primaryTerm.get(), Integer.toString(opsSynced).getBytes(Charset.forName("UTF-8"))))); fail("we are already closed"); } catch (AlreadyClosedException ex) { assertNotNull(ex.getCause()); @@ -1745,7 +1749,7 @@ public class TranslogTests extends ESTestCase { translog.close(); // we are closed final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = translog.getDeletionPolicy(); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertEquals("lastCommitted must be 1 less than current", translogGeneration.translogFileGeneration + 1, tlog.currentFileGeneration()); assertFalse(tlog.syncNeeded()); @@ -1768,7 +1772,7 @@ public class TranslogTests extends ESTestCase { LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer borders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { locations.add(translog.add( - new Translog.Index("test", "" + opsAdded, opsAdded, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); + new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8"))))); try (Translog.Snapshot snapshot = this.translog.newSnapshot()) { assertEquals(opsAdded + 1, snapshot.totalOperations()); for (int i = 0; i < opsAdded; i++) { @@ -1787,11 +1791,11 @@ public class TranslogTests extends ESTestCase { TranslogConfig config = getTranslogConfig(tempDir); Translog translog = getFailableTranslog(fail, config, false, true, null, createTranslogDeletionPolicy()); LineFileDocs lineFileDocs = new LineFileDocs(random()); // writes pretty big docs so we cross buffer boarders regularly - translog.add(new Translog.Index("test", "1", 0, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); fail.failAlways(); try { Translog.Location location = translog.add( - new Translog.Index("test", "2", 1, lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); + new Translog.Index("test", "2", 1, primaryTerm.get(), lineFileDocs.nextDoc().toString().getBytes(Charset.forName("UTF-8")))); if (randomBoolean()) { translog.ensureSynced(location); } else { @@ -1881,7 +1885,7 @@ public class TranslogTests extends ESTestCase { } } try (Translog tlog = - new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + new Translog(config, translogUUID, createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); Translog.Snapshot snapshot = tlog.newSnapshot()) { if (writtenOperations.size() != snapshot.totalOperations()) { for (int i = 0; i < threadCount; i++) { @@ -1908,7 +1912,7 @@ public class TranslogTests extends ESTestCase { public void testRecoveryFromAFutureGenerationCleansUp() throws IOException { int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1916,7 +1920,7 @@ public class TranslogTests extends ESTestCase { translog.rollGeneration(); long comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1927,7 +1931,7 @@ public class TranslogTests extends ESTestCase { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + translog = new Translog(config, translog.getTranslogUUID(), deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); assertThat(translog.getMinFileGeneration(), equalTo(1L)); // no trimming done yet, just recovered for (long gen = 1; gen < translog.currentFileGeneration(); gen++) { @@ -1958,7 +1962,7 @@ public class TranslogTests extends ESTestCase { translogUUID = translog.getTranslogUUID(); int translogOperations = randomIntBetween(10, 100); for (int op = 0; op < translogOperations / 2; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1966,7 +1970,7 @@ public class TranslogTests extends ESTestCase { translog.rollGeneration(); comittedGeneration = randomLongBetween(2, translog.currentFileGeneration()); for (int op = translogOperations / 2; op < translogOperations; op++) { - translog.add(new Translog.Index("test", "" + op, op, Integer.toString(op).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + op, op, primaryTerm.get(), Integer.toString(op).getBytes(Charset.forName("UTF-8")))); if (rarely()) { translog.rollGeneration(); } @@ -1983,7 +1987,7 @@ public class TranslogTests extends ESTestCase { final TranslogDeletionPolicy deletionPolicy = new TranslogDeletionPolicy(-1, -1); deletionPolicy.setTranslogGenerationOfLastCommit(randomLongBetween(comittedGeneration, Long.MAX_VALUE)); deletionPolicy.setMinTranslogGenerationForRecovery(comittedGeneration); - try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { // we don't know when things broke exactly assertThat(translog.getMinFileGeneration(), greaterThanOrEqualTo(1L)); assertThat(translog.getMinFileGeneration(), lessThanOrEqualTo(comittedGeneration)); @@ -2047,9 +2051,9 @@ public class TranslogTests extends ESTestCase { }; if (translogUUID == null) { translogUUID = Translog.createEmptyTranslog( - config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory); + config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, channelFactory, primaryTerm.get()); } - return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED) { + return new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { @Override ChannelFactory getChannelFactory() { return channelFactory; @@ -2157,10 +2161,10 @@ public class TranslogTests extends ESTestCase { Path tempDir = createTempDir(); TranslogConfig config = getTranslogConfig(tempDir); Translog translog = createTranslog(config); - translog.add(new Translog.Index("test", "boom", 0, "boom".getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "boom", 0, primaryTerm.get(), "boom".getBytes(Charset.forName("UTF-8")))); translog.close(); try { - new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED) { + new Translog(config, translog.getTranslogUUID(), createTranslogDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get) { @Override protected TranslogWriter createWriter(long fileGeneration, long initialMinTranslogGen, long initialGlobalCheckpoint) throws IOException { @@ -2175,7 +2179,7 @@ public class TranslogTests extends ESTestCase { } public void testRecoverWithUnbackedNextGen() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); @@ -2191,7 +2195,7 @@ public class TranslogTests extends ESTestCase { assertNotNull("operation 1 must be non-null", op); assertEquals("payload mismatch for operation 1", 1, Integer.parseInt(op.getSource().source.utf8ToString())); - tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(2).getBytes(Charset.forName("UTF-8")))); + tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(2).getBytes(Charset.forName("UTF-8")))); } try (Translog tlog = openTranslog(config, translog.getTranslogUUID()); @@ -2209,7 +2213,7 @@ public class TranslogTests extends ESTestCase { } public void testRecoverWithUnbackedNextGenInIllegalState() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); Path ckp = config.getTranslogPath().resolve(Translog.CHECKPOINT_FILE_NAME); @@ -2218,7 +2222,7 @@ public class TranslogTests extends ESTestCase { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); try { - Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED); + Translog tlog = new Translog(config, translog.getTranslogUUID(), translog.getDeletionPolicy(), () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2228,7 +2232,7 @@ public class TranslogTests extends ESTestCase { } public void testRecoverWithUnbackedNextGenAndFutureFile() throws IOException { - translog.add(new Translog.Index("test", "" + 0, 0, Integer.toString(0).getBytes(Charset.forName("UTF-8")))); + translog.add(new Translog.Index("test", "" + 0, 0, primaryTerm.get(), Integer.toString(0).getBytes(Charset.forName("UTF-8")))); translog.close(); TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); @@ -2240,7 +2244,7 @@ public class TranslogTests extends ESTestCase { Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 1) + ".tlog")); // we add N+1 and N+2 to ensure we only delete the N+1 file and never jump ahead and wipe without the right condition Files.createFile(config.getTranslogPath().resolve("translog-" + (read.generation + 2) + ".tlog")); - try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED)) { + try (Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get)) { assertFalse(tlog.syncNeeded()); try (Translog.Snapshot snapshot = tlog.newSnapshot()) { for (int i = 0; i < 1; i++) { @@ -2249,11 +2253,11 @@ public class TranslogTests extends ESTestCase { assertEquals("payload missmatch", i, Integer.parseInt(next.getSource().source.utf8ToString())); } } - tlog.add(new Translog.Index("test", "" + 1, 1, Integer.toString(1).getBytes(Charset.forName("UTF-8")))); + tlog.add(new Translog.Index("test", "" + 1, 1, primaryTerm.get(), Integer.toString(1).getBytes(Charset.forName("UTF-8")))); } try { - Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + Translog tlog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); fail("file already exists?"); } catch (TranslogException ex) { // all is well @@ -2289,7 +2293,7 @@ public class TranslogTests extends ESTestCase { LineFileDocs lineFileDocs = new LineFileDocs(random()); //writes pretty big docs so we cross buffer boarders regularly for (int opsAdded = 0; opsAdded < numOps; opsAdded++) { String doc = lineFileDocs.nextDoc().toString(); - failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, doc.getBytes(Charset.forName("UTF-8")))); + failableTLog.add(new Translog.Index("test", "" + opsAdded, opsAdded, primaryTerm.get(), doc.getBytes(Charset.forName("UTF-8")))); unsynced.add(doc); if (randomBoolean()) { failableTLog.sync(); @@ -2361,9 +2365,9 @@ public class TranslogTests extends ESTestCase { deletionPolicy.setMinTranslogGenerationForRecovery(minGenForRecovery); if (generationUUID == null) { // we never managed to successfully create a translog, make it - generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + generationUUID = Translog.createEmptyTranslog(config.getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); } - try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + try (Translog translog = new Translog(config, generationUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); Translog.Snapshot snapshot = translog.newSnapshotFromGen(minGenForRecovery)) { assertEquals(syncedDocs.size(), snapshot.totalOperations()); for (int i = 0; i < syncedDocs.size(); i++) { @@ -2422,20 +2426,20 @@ public class TranslogTests extends ESTestCase { * Tests that closing views after the translog is fine and we can reopen the translog */ public void testPendingDelete() throws IOException { - translog.add(new Translog.Index("test", "1", 0, new byte[]{1})); + translog.add(new Translog.Index("test", "1", 0, primaryTerm.get(), new byte[]{1})); translog.rollGeneration(); TranslogConfig config = translog.getConfig(); final String translogUUID = translog.getTranslogUUID(); final TranslogDeletionPolicy deletionPolicy = createTranslogDeletionPolicy(config.getIndexSettings()); translog.close(); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); - translog.add(new Translog.Index("test", "2", 1, new byte[]{2})); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); + translog.add(new Translog.Index("test", "2", 1, primaryTerm.get(), new byte[]{2})); translog.rollGeneration(); Closeable lock = translog.acquireRetentionLock(); - translog.add(new Translog.Index("test", "3", 2, new byte[]{3})); + translog.add(new Translog.Index("test", "3", 2, primaryTerm.get(), new byte[]{3})); translog.close(); IOUtils.close(lock); - translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED); + translog = new Translog(config, translogUUID, deletionPolicy, () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTerm::get); } public static Translog.Location randomTranslogLocation() { @@ -2500,20 +2504,31 @@ public class TranslogTests extends ESTestCase { final int rolls = randomIntBetween(1, 16); int totalOperations = 0; int seqNo = 0; + final List primaryTerms = new ArrayList<>(); + primaryTerms.add(primaryTerm.get()); // We always create an empty translog. + primaryTerms.add(primaryTerm.get()); for (int i = 0; i < rolls; i++) { final int operations = randomIntBetween(1, 128); for (int j = 0; j < operations; j++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test")); + translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test")); totalOperations++; } try (ReleasableLock ignored = translog.writeLock.acquire()) { + if (randomBoolean()){ + primaryTerm.incrementAndGet(); + } translog.rollGeneration(); + primaryTerms.add(primaryTerm.get()); } assertThat(translog.currentFileGeneration(), equalTo(generation + i + 1)); + assertThat(translog.getCurrent().getPrimaryTerm(), equalTo(primaryTerm.get())); assertThat(translog.totalOperations(), equalTo(totalOperations)); } for (int i = 0; i <= rolls; i++) { assertFileIsPresent(translog, generation + i); + final List storedPrimaryTerms = Stream.concat(translog.getReaders().stream(), Stream.of(translog.getCurrent())) + .map(t -> t.getPrimaryTerm()).collect(Collectors.toList()); + assertThat(storedPrimaryTerms, equalTo(primaryTerms)); } long minGenForRecovery = randomLongBetween(generation, generation + rolls); commit(translog, minGenForRecovery, generation + rolls); @@ -2616,8 +2631,11 @@ public class TranslogTests extends ESTestCase { final int operations = randomIntBetween(1, 4096); long seqNo = 0; for (int i = 0; i < operations; i++) { - translog.add(new Translog.NoOp(seqNo++, 0, "test'")); + translog.add(new Translog.NoOp(seqNo++, primaryTerm.get(), "test'")); if (rarely()) { + if (rarely()) { + primaryTerm.incrementAndGet(); + } translog.rollGeneration(); } } @@ -2674,7 +2692,7 @@ public class TranslogTests extends ESTestCase { for (int gen = 0; gen < generations; gen++) { final int operations = randomIntBetween(1, 100); for (int i = 0; i < operations; i++) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), new byte[]{1}); + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo.getAndIncrement(), primaryTerm.get(), new byte[]{1}); translog.add(op); views.peek().add(op); } @@ -2699,7 +2717,7 @@ public class TranslogTests extends ESTestCase { List batch = LongStream.rangeClosed(0, between(0, 500)).boxed().collect(Collectors.toList()); Randomness.shuffle(batch); for (Long seqNo : batch) { - Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, new byte[]{1}); + Translog.Index op = new Translog.Index("doc", randomAlphaOfLength(10), seqNo, primaryTerm.get(), new byte[]{1}); translog.add(op); latestOperations.put(op.seqNo(), op); } diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java deleted file mode 100644 index d57373ebfe3..00000000000 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogVersionTests.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.translog; - -import org.elasticsearch.index.seqno.SequenceNumbers; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.channels.FileChannel; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; - -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; - -/** - * Tests for reading old and new translog files - */ -public class TranslogVersionTests extends ESTestCase { - - private void checkFailsToOpen(String file, String expectedMessage) throws IOException { - Path translogFile = getDataPath(file); - assertThat("test file should exist", Files.exists(translogFile), equalTo(true)); - try { - openReader(translogFile, 0); - fail("should be able to open an old translog"); - } catch (IllegalStateException e) { - assertThat(e.getMessage(), containsString(expectedMessage)); - } - - } - - public void testV0LegacyTranslogVersion() throws Exception { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v0.binary", "pre-1.4 translog"); - } - - public void testV1ChecksummedTranslogVersion() throws Exception { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1.binary", "pre-2.0 translog"); - } - - public void testCorruptedTranslogs() throws Exception { - try { - Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-v1-corrupted-magic.binary"); - assertThat("test file should exist", Files.exists(translogFile), equalTo(true)); - openReader(translogFile, 0); - fail("should have thrown an exception about the header being corrupt"); - } catch (TranslogCorruptedException e) { - assertThat("translog corruption from header: " + e.getMessage(), - e.getMessage().contains("translog looks like version 1 or later, but has corrupted header"), equalTo(true)); - } - - try { - Path translogFile = getDataPath("/org/elasticsearch/index/translog/translog-invalid-first-byte.binary"); - assertThat("test file should exist", Files.exists(translogFile), equalTo(true)); - openReader(translogFile, 0); - fail("should have thrown an exception about the header being corrupt"); - } catch (TranslogCorruptedException e) { - assertThat("translog corruption from header: " + e.getMessage(), - e.getMessage().contains("Invalid first byte in translog file, got: 1, expected 0x00 or 0x3f"), equalTo(true)); - } - - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-corrupted-body.binary", "pre-2.0 translog"); - } - - public void testTruncatedTranslog() throws Exception { - checkFailsToOpen("/org/elasticsearch/index/translog/translog-v1-truncated.binary", "pre-2.0 translog"); - } - - public TranslogReader openReader(final Path path, final long id) throws IOException { - try (FileChannel channel = FileChannel.open(path, StandardOpenOption.READ)) { - final long minSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final long maxSeqNo = SequenceNumbers.NO_OPS_PERFORMED; - final Checkpoint checkpoint = - new Checkpoint(Files.size(path), 1, id, minSeqNo, maxSeqNo, SequenceNumbers.UNASSIGNED_SEQ_NO, id); - return TranslogReader.open(channel, path, checkpoint, null); - } - } -} diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index a914eb435bb..a2149b9d28a 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -241,7 +241,7 @@ public class FlushIT extends ESIntegTestCase { private void indexDoc(Engine engine, String id) throws IOException { final ParsedDocument doc = InternalEngineTests.createParsedDoc(id, null); - final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc)); + final Engine.IndexResult indexResult = engine.index(new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), 1L, doc)); assertThat(indexResult.getFailure(), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 917f5deeac1..babf8518d44 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -268,7 +268,7 @@ public class RecoverySourceHandlerTests extends ESTestCase { final BytesReference source = new BytesArray(new byte[] { 1 }); final ParsedDocument doc = new ParsedDocument(versionField, seqID, id, type, null, Arrays.asList(document), source, XContentType.JSON, null); - return new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), doc); + return new Engine.Index(new Term("_id", Uid.encodeId(doc.id())), randomNonNegativeLong(), doc); } public void testHandleCorruptedIndexOnSendSendFiles() throws Throwable { diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index 49e557c3dde..f46ab7ebbd6 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -200,7 +200,8 @@ public class RecoveryTests extends ESIndexLevelReplicationTestCase { final String historyUUIDtoUse = UUIDs.randomBase64UUID(random()); if (randomBoolean()) { // create a new translog - translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, replica.shardId()); + translogUUIDtoUse = Translog.createEmptyTranslog(replica.shardPath().resolveTranslog(), flushedDocs, + replica.shardId(), replica.getPrimaryTerm()); translogGenToUse = 1; } else { translogUUIDtoUse = translogGeneration.translogUUID; diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java index 667adf9d990..dea92c2927d 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -84,6 +84,7 @@ import java.nio.file.Path; import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.concurrent.atomic.AtomicLong; import java.util.function.BiFunction; import java.util.function.LongSupplier; import java.util.function.ToLongBiFunction; @@ -110,6 +111,8 @@ public abstract class EngineTestCase extends ESTestCase { protected String codecName; protected Path primaryTranslogDir; protected Path replicaTranslogDir; + // A default primary term is used by engine instances created in this test. + protected AtomicLong primaryTerm = new AtomicLong(); protected static void assertVisibleCount(Engine engine, int numDocs) throws IOException { assertVisibleCount(engine, numDocs, true); @@ -130,7 +133,7 @@ public abstract class EngineTestCase extends ESTestCase { @Before public void setUp() throws Exception { super.setUp(); - + primaryTerm.set(randomLongBetween(1, Long.MAX_VALUE)); CodecService codecService = new CodecService(null, logger); String name = Codec.getDefault().getName(); if (Arrays.asList(codecService.availableCodecs()).contains(name)) { @@ -178,7 +181,7 @@ public abstract class EngineTestCase extends ESTestCase { new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), globalCheckpointSupplier); + config.getCircuitBreakerService(), globalCheckpointSupplier, config.getPrimaryTermSupplier()); } public EngineConfig copy(EngineConfig config, Analyzer analyzer) { @@ -187,7 +190,7 @@ public abstract class EngineTestCase extends ESTestCase { new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getExternalRefreshListener(), Collections.emptyList(), config.getIndexSort(), config.getTranslogRecoveryRunner(), - config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier()); + config.getCircuitBreakerService(), config.getGlobalCheckpointSupplier(), config.getPrimaryTermSupplier()); } @Override @@ -260,15 +263,16 @@ public abstract class EngineTestCase extends ESTestCase { return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); } - protected Translog createTranslog() throws IOException { - return createTranslog(primaryTranslogDir); + protected Translog createTranslog(LongSupplier primaryTermSupplier) throws IOException { + return createTranslog(primaryTranslogDir, primaryTermSupplier); } - protected Translog createTranslog(Path translogPath) throws IOException { + protected Translog createTranslog(Path translogPath, LongSupplier primaryTermSupplier) throws IOException { TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId); - return new Translog( - translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.NO_OPS_PERFORMED); + String translogUUID = Translog.createEmptyTranslog(translogPath, SequenceNumbers.NO_OPS_PERFORMED, shardId, + primaryTermSupplier.getAsLong()); + return new Translog(translogConfig, translogUUID, createTranslogDeletionPolicy(INDEX_SETTINGS), + () -> SequenceNumbers.NO_OPS_PERFORMED, primaryTermSupplier); } protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { @@ -366,8 +370,8 @@ public abstract class EngineTestCase extends ESTestCase { final Directory directory = store.directory(); if (Lucene.indexExists(directory) == false) { store.createEmpty(); - final String translogUuid = - Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), SequenceNumbers.NO_OPS_PERFORMED, shardId); + final String translogUuid = Translog.createEmptyTranslog(config.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, shardId, primaryTerm.get()); store.associateIndexWithNewTranslog(translogUuid); } @@ -449,7 +453,7 @@ public abstract class EngineTestCase extends ESTestCase { new NoneCircuitBreakerService(), globalCheckpointSupplier == null ? new ReplicationTracker(shardId, allocationId.getId(), indexSettings, SequenceNumbers.NO_OPS_PERFORMED) : - globalCheckpointSupplier); + globalCheckpointSupplier, primaryTerm::get); return config; } @@ -475,12 +479,12 @@ public abstract class EngineTestCase extends ESTestCase { } protected Engine.Index indexForDoc(ParsedDocument doc) { - return new Engine.Index(newUid(doc), doc); + return new Engine.Index(newUid(doc), primaryTerm.get(), doc); } protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo, boolean isRetry) { - return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL, + return new Engine.Index(newUid(doc), doc, seqNo, primaryTerm.get(), version, VersionType.EXTERNAL, Engine.Operation.Origin.REPLICA, System.nanoTime(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); } From 183ba9a7e56bf7ba89eaa650e270ae6b2962e0a4 Mon Sep 17 00:00:00 2001 From: Sachin Frayne <32795683+antomos@users.noreply.github.com> Date: Fri, 13 Apr 2018 02:16:37 +0100 Subject: [PATCH 07/14] Fix typo in max number of threads check docs (#29469) Historically, the bootstrap checks used 2048 as the minimum limit for the maximum number of threads. This limit was guided by the fact that the number of processors was artificially capped at 32. This limit was removed in 6.0.0 and the minimum limit was raised to 4096 to accommodate this. However, the docs were not updated and this commit addresses that miss. --- docs/reference/setup/bootstrap-checks.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/bootstrap-checks.asciidoc b/docs/reference/setup/bootstrap-checks.asciidoc index cbeb10c8c85..4670bb87580 100644 --- a/docs/reference/setup/bootstrap-checks.asciidoc +++ b/docs/reference/setup/bootstrap-checks.asciidoc @@ -114,7 +114,7 @@ that the Elasticsearch process has the rights to create enough threads under normal use. This check is enforced only on Linux. If you are on Linux, to pass the maximum number of threads check, you must configure your system to allow the Elasticsearch process the ability to create at -least 2048 threads. This can be done via `/etc/security/limits.conf` +least 4096 threads. This can be done via `/etc/security/limits.conf` using the `nproc` setting (note that you might have to increase the limits for the `root` user too). From d44bbb7602d11563434502d8fab2f9ab4628bdd2 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Thu, 12 Apr 2018 22:33:38 -0400 Subject: [PATCH 08/14] Fix auto-generated ID example format (#29461) This commit fixes the format of an example auto-generated ID in the docs where the format misleadingly looked like a UUID rather than a base64 ID. --- docs/reference/docs/index_.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/docs/index_.asciidoc b/docs/reference/docs/index_.asciidoc index fe1ebf47396..01eb0ad6adf 100644 --- a/docs/reference/docs/index_.asciidoc +++ b/docs/reference/docs/index_.asciidoc @@ -229,14 +229,14 @@ The result of the above index operation is: }, "_index" : "twitter", "_type" : "_doc", - "_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32", + "_id" : "W0tpsmIBdwcYyG50zbta", "_version" : 1, "_seq_no" : 0, "_primary_term" : 1, "result": "created" } -------------------------------------------------- -// TESTRESPONSE[s/6a8ca01c-7896-48e9-81cc-9f70661fcb32/$body._id/ s/"successful" : 2/"successful" : 1/] +// TESTRESPONSE[s/W0tpsmIBdwcYyG50zbta/$body._id/ s/"successful" : 2/"successful" : 1/] [float] [[index-routing]] From ebd6b5b7ba285567276c1634f6dad15d723d8002 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Fri, 13 Apr 2018 09:07:51 +0200 Subject: [PATCH 09/14] Deprecate filtering on `_type`. (#29468) As indices are only allowed to have one type now, and types are going away in the future, we should deprecate filtering by `_type`. Relates #15613 --- docs/painless/painless-getting-started.asciidoc | 12 ++++++------ .../bucket/significanttext-aggregation.asciidoc | 10 +++++----- .../metrics/percentile-rank-aggregation.asciidoc | 2 +- docs/reference/docs/delete-by-query.asciidoc | 8 ++++---- docs/reference/docs/update-by-query.asciidoc | 8 ++++---- docs/reference/search/search.asciidoc | 12 +----------- .../rest-api-spec/test/stats/20_empty_bucket.yml | 2 -- .../test/stats/30_single_value_field.yml | 6 ------ .../test/stats/40_multi_value_field.yml | 7 ------- .../rest-api-spec/test/create/60_refresh.yml | 4 ---- .../rest-api-spec/test/delete/50_refresh.yml | 7 ------- .../rest-api-spec/test/index/60_refresh.yml | 4 ---- .../rest-api-spec/test/indices.rollover/10_basic.yml | 1 - .../rest-api-spec/test/indices.sort/10_basic.yml | 5 ----- .../resources/rest-api-spec/test/mlt/10_basic.yml | 1 - .../resources/rest-api-spec/test/mlt/20_docs.yml | 1 - .../resources/rest-api-spec/test/mlt/30_unlike.yml | 1 - .../test/search.aggregation/30_sig_terms.yml | 2 -- .../test/search.aggregation/90_sig_text.yml | 4 ---- .../test/search/110_field_collapsing.yml | 10 ---------- .../rest-api-spec/test/search/20_default_values.yml | 3 --- .../rest-api-spec/test/search/90_search_after.yml | 4 ---- .../rest-api-spec/test/update/60_refresh.yml | 5 ----- .../elasticsearch/action/search/SearchRequest.java | 2 ++ .../action/search/SearchRequestBuilder.java | 2 ++ .../org/elasticsearch/index/query/QueryBuilders.java | 2 ++ .../elasticsearch/index/query/TypeQueryBuilder.java | 4 ++++ .../rest/action/search/RestSearchAction.java | 9 ++++++++- .../index/query/TypeQueryBuilderTests.java | 12 ++++++++++++ 29 files changed, 51 insertions(+), 99 deletions(-) diff --git a/docs/painless/painless-getting-started.asciidoc b/docs/painless/painless-getting-started.asciidoc index e82e14b0438..8cf163d55d7 100644 --- a/docs/painless/painless-getting-started.asciidoc +++ b/docs/painless/painless-getting-started.asciidoc @@ -239,7 +239,7 @@ their last name: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -260,7 +260,7 @@ names start with a consonant and end with a vowel: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -281,7 +281,7 @@ remove all of the vowels in all of their last names: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -297,7 +297,7 @@ method so it supports `$1` and `\1` for replacements: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -319,7 +319,7 @@ This will make all of the vowels in the hockey player's last names upper case: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", @@ -337,7 +337,7 @@ last names upper case: [source,js] ---------------------------------------------------------------- -POST hockey/player/_update_by_query +POST hockey/_update_by_query { "script": { "lang": "painless", diff --git a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc index 11dd0ea7b99..fa4d94c2327 100644 --- a/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc +++ b/docs/reference/aggregations/bucket/significanttext-aggregation.asciidoc @@ -38,7 +38,7 @@ Example: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query" : { "match" : {"content" : "Bird flu"} @@ -153,7 +153,7 @@ We can drill down into examples of these documents to see why pozmantier is conn [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query": { "simple_query_string": { @@ -221,7 +221,7 @@ with the `filter_duplicate_text` setting turned on: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query": { "match": { @@ -424,7 +424,7 @@ context: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query" : { "match" : { @@ -463,7 +463,7 @@ will be analyzed using the `source_fields` parameter: [source,js] -------------------------------------------------- -GET news/article/_search +GET news/_search { "query" : { "match" : { diff --git a/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc index da5595adfbc..3fd0d21ae4e 100644 --- a/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc +++ b/docs/reference/aggregations/metrics/percentile-rank-aggregation.asciidoc @@ -217,7 +217,7 @@ had a value. [source,js] -------------------------------------------------- -GET latency/data/_search +GET latency/_search { "size": 0, "aggs" : { diff --git a/docs/reference/docs/delete-by-query.asciidoc b/docs/reference/docs/delete-by-query.asciidoc index 1ca0f35e499..be015a811e9 100644 --- a/docs/reference/docs/delete-by-query.asciidoc +++ b/docs/reference/docs/delete-by-query.asciidoc @@ -75,7 +75,7 @@ Back to the API format, this will delete tweets from the `twitter` index: [source,js] -------------------------------------------------- -POST twitter/_doc/_delete_by_query?conflicts=proceed +POST twitter/_delete_by_query?conflicts=proceed { "query": { "match_all": {} @@ -85,12 +85,12 @@ POST twitter/_doc/_delete_by_query?conflicts=proceed // CONSOLE // TEST[setup:twitter] -It's also possible to delete documents of multiple indexes and multiple -types at once, just like the search API: +It's also possible to delete documents of multiple indexes at once, just like +the search API: [source,js] -------------------------------------------------- -POST twitter,blog/_docs,post/_delete_by_query +POST twitter,blog/_delete_by_query { "query": { "match_all": {} diff --git a/docs/reference/docs/update-by-query.asciidoc b/docs/reference/docs/update-by-query.asciidoc index 5f28443cf9b..482f3d62f5d 100644 --- a/docs/reference/docs/update-by-query.asciidoc +++ b/docs/reference/docs/update-by-query.asciidoc @@ -67,7 +67,7 @@ Back to the API format, this will update tweets from the `twitter` index: [source,js] -------------------------------------------------- -POST twitter/_doc/_update_by_query?conflicts=proceed +POST twitter/_update_by_query?conflicts=proceed -------------------------------------------------- // CONSOLE // TEST[setup:twitter] @@ -145,12 +145,12 @@ This API doesn't allow you to move the documents it touches, just modify their source. This is intentional! We've made no provisions for removing the document from its original location. -It's also possible to do this whole thing on multiple indexes and multiple -types at once, just like the search API: +It's also possible to do this whole thing on multiple indexes at once, just +like the search API: [source,js] -------------------------------------------------- -POST twitter,blog/_doc,post/_update_by_query +POST twitter,blog/_update_by_query -------------------------------------------------- // CONSOLE // TEST[s/^/PUT twitter\nPUT blog\n/] diff --git a/docs/reference/search/search.asciidoc b/docs/reference/search/search.asciidoc index d507d5001f6..b8e3da976d6 100644 --- a/docs/reference/search/search.asciidoc +++ b/docs/reference/search/search.asciidoc @@ -12,8 +12,7 @@ that match the query. The query can either be provided using a simple All search APIs can be applied across multiple types within an index, and across multiple indices with support for the <>. For -example, we can search on all documents across all types within the -twitter index: +example, we can search on all documents within the twitter index: [source,js] -------------------------------------------------- @@ -22,15 +21,6 @@ GET /twitter/_search?q=user:kimchy // CONSOLE // TEST[setup:twitter] -We can also search within specific types: - -[source,js] --------------------------------------------------- -GET /twitter/tweet,user/_search?q=user:kimchy --------------------------------------------------- -// CONSOLE -// TEST[setup:twitter] - We can also search all tweets with a certain tag across several indices (for example, when each user has his own index): diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml index 5ed2fe542ee..ad29728a6bd 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/20_empty_bucket.yml @@ -35,14 +35,12 @@ - do: search: index: empty_bucket_idx - type: test - match: {hits.total: 2} - do: search: index: empty_bucket_idx - type: test body: {"aggs": {"histo": {"histogram": {"field": "val1", "interval": 1, "min_doc_count": 0}, "aggs": { "mfs" : { "matrix_stats": {"fields": ["value", "val1"]} } } } } } - match: {hits.total: 2} diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml index fbc73974f8a..abbc22a4f25 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/30_single_value_field.yml @@ -130,7 +130,6 @@ setup: - do: search: index: unmapped - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"]} } } } - match: {hits.total: 0} @@ -142,7 +141,6 @@ setup: - do: search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3"]} } } } - match: {hits.total: 15} @@ -155,7 +153,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"]} } } } - match: {hits.total: 15} @@ -169,7 +166,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"], "missing" : {"val2" : 10} } } } } - match: {hits.total: 15} @@ -184,7 +180,6 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } } --- @@ -194,5 +189,4 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "val3"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } } diff --git a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml index dff452d43cf..978c35f8c0b 100644 --- a/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml +++ b/modules/aggs-matrix-stats/src/test/resources/rest-api-spec/test/stats/40_multi_value_field.yml @@ -130,7 +130,6 @@ setup: - do: search: index: unmapped - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } - match: {hits.total: 0} @@ -142,7 +141,6 @@ setup: - do: search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "max"} } } } - match: {hits.total: 15} @@ -156,7 +154,6 @@ setup: - do: search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "vals"], "mode" : "min"} } } } - match: {hits.total: 15} @@ -170,7 +167,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"]} } } } - match: {hits.total: 15} @@ -184,7 +180,6 @@ setup: - do: search: index: [test, unmapped] - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val2", "vals"], "missing" : {"val2" : 10, "vals" : 5 } } } } } - match: {hits.total: 15} @@ -199,7 +194,6 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["vals", "val3"], "script" : { "my_script" : {"source" : "1 + doc['val1'].value", "lang" : "js"} } } } } } --- @@ -209,5 +203,4 @@ setup: catch: /parsing_exception/ search: index: test - type: test body: {"aggs": { "mfs" : { "matrix_stats": {"fields": ["val1", "val3", "vals"], "script" : { "my_script" : {"source" : "my_var + doc['val1'].value", "params" : { "my_var" : 1 }, "lang" : "js" } } } } } } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml index deb5e1e6850..e461691ace2 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/create/60_refresh.yml @@ -18,7 +18,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -36,7 +35,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 2 }} @@ -56,7 +54,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -76,7 +73,6 @@ - do: search: index: create_60_refresh_1 - type: test body: query: { term: { _id: create_60_refresh_id1 }} - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml index f86c7250a37..ad27bb68601 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/delete/50_refresh.yml @@ -37,7 +37,6 @@ - do: search: index: test_1 - type: test body: query: { terms: { _id: [1,3] }} @@ -52,7 +51,6 @@ - do: search: index: test_1 - type: test body: query: { terms: { _id: [1,3] }} @@ -72,7 +70,6 @@ - do: search: index: test_1 - type: test body: query: { terms: { _id: [1,3] }} @@ -92,7 +89,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} - match: { hits.total: 1 } @@ -107,7 +103,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} - match: { hits.total: 0 } @@ -126,7 +121,6 @@ - do: search: index: delete_50_refresh_1 - type: test body: query: { term: { _id: delete_50_refresh_id1 }} - match: { hits.total: 1 } @@ -142,7 +136,6 @@ - do: search: index: delete_50_refresh_1 - type: test body: query: { term: { _id: delete_50_refresh_id1 }} - match: { hits.total: 0 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml index 5d20406abee..cd78a4e4282 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/index/60_refresh.yml @@ -19,7 +19,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -37,7 +36,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 2 }} @@ -57,7 +55,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -77,7 +74,6 @@ - do: search: index: index_60_refresh_1 - type: test body: query: { term: { _id: index_60_refresh_id1 }} - match: { hits.total: 1 } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml index 1d22d4a4c1f..a797edcfa4e 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/10_basic.yml @@ -70,7 +70,6 @@ - do: search: index: logs_search - type: test - match: { hits.total: 1 } - match: { hits.hits.0._index: "logs-000002"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml index aec3c41672d..550b868ff49 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.sort/10_basic.yml @@ -93,7 +93,6 @@ - do: search: index: test - type: test body: sort: ["rank"] size: 1 @@ -105,7 +104,6 @@ - do: search: index: test - type: test body: sort: ["rank"] query: {"range": { "rank": { "from": 0 } } } @@ -128,7 +126,6 @@ - do: search: index: test - type: test body: sort: _doc @@ -146,7 +143,6 @@ - do: search: index: test - type: test body: sort: ["rank"] query: {"range": { "rank": { "from": 0 } } } @@ -163,7 +159,6 @@ catch: /disabling \[track_total_hits\] is not allowed in a scroll context/ search: index: test - type: test scroll: 1m body: sort: ["rank"] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml index 31049b07e21..ee577f6a228 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/10_basic.yml @@ -32,7 +32,6 @@ - do: search: index: test_1 - type: test body: query: more_like_this: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml index 415a38f00c1..42b6619a5f8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/20_docs.yml @@ -37,7 +37,6 @@ - do: search: index: test_1 - type: test body: query: more_like_this: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml index 01cd372e8cf..f3d641081e8 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/mlt/30_unlike.yml @@ -37,7 +37,6 @@ - do: search: index: test_1 - type: test body: query: more_like_this: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml index a708ff19d7e..fe8c33926d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/30_sig_terms.yml @@ -67,14 +67,12 @@ - do: search: index: goodbad - type: doc - match: {hits.total: 7} - do: search: index: goodbad - type: doc body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_terms": {"significant_terms": {"field": "text"}}}}}} - match: {aggregations.class.buckets.0.sig_terms.buckets.0.key: "bad"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml index bfbf171e8cc..6f368463aa0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/90_sig_text.yml @@ -72,14 +72,12 @@ - do: search: index: goodbad - type: doc - match: {hits.total: 7} - do: search: index: goodbad - type: doc body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text"}}}}}} - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} @@ -159,14 +157,12 @@ - do: search: index: goodbad - type: doc - match: {hits.total: 7} - do: search: index: goodbad - type: doc body: {"aggs": {"class": {"terms": {"field": "class"},"aggs": {"sig_text": {"significant_text": {"field": "text", "filter_duplicate_text": true}}}}}} - match: {aggregations.class.buckets.0.sig_text.buckets.0.key: "bad"} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml index 1ae9c48e59c..a7998a0b2f9 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/110_field_collapsing.yml @@ -64,7 +64,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group } sort: [{ sort: desc }] @@ -100,7 +99,6 @@ setup: - do: search: index: test - type: test body: from: 2 collapse: { field: numeric_group } @@ -125,7 +123,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } } sort: [{ sort: desc }] @@ -169,7 +166,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group, max_concurrent_group_searches: 10, inner_hits: { name: sub_hits, size: 2, sort: [{ sort: asc }] } } sort: [{ sort: desc }] @@ -215,7 +211,6 @@ setup: catch: /cannot use \`collapse\` in a scroll context/ search: index: test - type: test scroll: 1s body: collapse: { field: numeric_group } @@ -231,7 +226,6 @@ setup: catch: /cannot use \`collapse\` in conjunction with \`search_after\`/ search: index: test - type: test body: collapse: { field: numeric_group } search_after: [6] @@ -248,7 +242,6 @@ setup: catch: /cannot use \`collapse\` in conjunction with \`rescore\`/ search: index: test - type: test body: collapse: { field: numeric_group } rescore: @@ -269,7 +262,6 @@ setup: - do: search: index: test - type: test body: size: 0 collapse: { field: numeric_group, inner_hits: { name: sub_hits, size: 1} } @@ -288,7 +280,6 @@ setup: - do: search: index: test - type: test body: collapse: field: numeric_group @@ -345,7 +336,6 @@ setup: - do: search: index: test - type: test body: collapse: { field: numeric_group, inner_hits: { name: sub_hits, version: true, size: 2, sort: [{ sort: asc }] } } sort: [{ sort: desc }] diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml index 52fbd191853..359d5d80c68 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/20_default_values.yml @@ -29,7 +29,6 @@ setup: - do: search: index: _all - type: test body: query: match: @@ -40,7 +39,6 @@ setup: - do: search: index: test_1 - type: test body: query: match: @@ -54,7 +52,6 @@ setup: - do: search: index: test_2 - type: test body: query: match: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml index 920f55ae1be..3392adb50ac 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search/90_search_after.yml @@ -36,7 +36,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: @@ -54,7 +53,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: @@ -73,7 +71,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: @@ -92,7 +89,6 @@ setup: - do: search: index: test - type: test body: size: 1 query: diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml index 62d8fd125ff..8ac1568a127 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/update/60_refresh.yml @@ -21,7 +21,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 1 }} @@ -41,7 +40,6 @@ - do: search: index: test_1 - type: test body: query: { term: { _id: 2 }} @@ -71,7 +69,6 @@ - do: search: index: test_1 - type: test body: query: { term: { cat: dog }} @@ -91,7 +88,6 @@ - do: search: index: update_60_refresh_1 - type: test body: query: { term: { _id: update_60_refresh_id1 }} - match: { hits.total: 1 } @@ -109,7 +105,6 @@ - do: search: index: update_60_refresh_1 - type: test body: query: { match: { test: asdf } } - match: { hits.total: 1 } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java index 8df82279cb2..a7be0f41ffb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequest.java @@ -224,7 +224,9 @@ public final class SearchRequest extends ActionRequest implements IndicesRequest /** * The document types to execute the search against. Defaults to be executed against * all types. + * @deprecated Types are going away, prefer filtering on a type. */ + @Deprecated public SearchRequest types(String... types) { Objects.requireNonNull(types, "types must not be null"); for (String type : types) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java index 5342b55d542..1ddecf13315 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchRequestBuilder.java @@ -61,7 +61,9 @@ public class SearchRequestBuilder extends ActionRequestBuilder { public static final String NAME = "type"; private static final ParseField VALUE_FIELD = new ParseField("value"); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(TypeQueryBuilder.class)); private final String type; @@ -125,6 +128,7 @@ public class TypeQueryBuilder extends AbstractQueryBuilder { @Override protected Query doToQuery(QueryShardContext context) throws IOException { + DEPRECATION_LOGGER.deprecated("The [type] query is deprecated, filter on a field instead."); //LUCENE 4 UPGRADE document mapper should use bytesref as well? DocumentMapper documentMapper = context.getMapperService().documentMapper(type); if (documentMapper == null) { diff --git a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java index 184a8d364c6..6f0c033a0cf 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/search/RestSearchAction.java @@ -23,6 +23,8 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.index.query.QueryBuilder; @@ -55,6 +57,7 @@ public class RestSearchAction extends BaseRestHandler { public static final String TYPED_KEYS_PARAM = "typed_keys"; private static final Set RESPONSE_PARAMS = Collections.singleton(TYPED_KEYS_PARAM); + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(RestSearchAction.class)); public RestSearchAction(Settings settings, RestController controller) { super(settings); @@ -147,7 +150,11 @@ public class RestSearchAction extends BaseRestHandler { searchRequest.scroll(new Scroll(parseTimeValue(scroll, null, "scroll"))); } - searchRequest.types(Strings.splitStringByCommaToArray(request.param("type"))); + String types = request.param("type"); + if (types != null) { + DEPRECATION_LOGGER.deprecated("The {index}/{type}/_search endpoint is deprecated, use {index}/_search instead"); + } + searchRequest.types(Strings.splitStringByCommaToArray(types)); searchRequest.routing(request.param("routing")); searchRequest.preference(request.param("preference")); searchRequest.indicesOptions(IndicesOptions.fromRequest(request, searchRequest.indicesOptions())); diff --git a/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java index 9d6d1d8aa90..b75319b15c3 100644 --- a/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/TypeQueryBuilderTests.java @@ -71,4 +71,16 @@ public class TypeQueryBuilderTests extends AbstractQueryTestCase Date: Fri, 13 Apr 2018 09:08:45 +0200 Subject: [PATCH 10/14] Make index APIs work without types. (#29479) Unlike the `indices.create`, `indices.get_mapping` and `indices.put_mapping` APIs, the index APIs do not need the `include_type_name` option, they can work work with and without types withouth knowing whether types are being used. Internally, `_doc` is used as a type if no type is provided, like for the `indices.put_mapping` API. --- .../resources/rest-api-spec/api/index.json | 3 +- .../test/indices.put_mapping/20_no_types.yml | 39 +++++++++++++++++++ .../rest/action/document/RestBulkAction.java | 3 +- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json index a58598b3bb3..574206a0dc3 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/index.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/index.json @@ -4,7 +4,7 @@ "methods": ["POST", "PUT"], "url": { "path": "/{index}/{type}", - "paths": ["/{index}/{type}", "/{index}/{type}/{id}"], + "paths": ["/{index}/{type}", "/{index}/{type}/{id}", "/{index}/_doc/{id}", "/{index}/_doc"], "parts": { "id": { "type" : "string", @@ -17,7 +17,6 @@ }, "type": { "type" : "string", - "required" : true, "description" : "The type of the document" } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml index c10f73d58ae..40effe01b08 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.put_mapping/20_no_types.yml @@ -39,6 +39,45 @@ - match: { index.mappings.properties.foo.type: "keyword" } - match: { index.mappings.properties.bar.type: "float" } +# Explicit id + - do: + index: + index: index + id: 1 + body: { foo: bar } + +# Implicit id + - do: + index: + index: index + body: { foo: bar } + +# Bulk with explicit id + - do: + bulk: + index: index + body: | + { "index": { "_id": "2" } } + { "doc": { "foo": "baz" } } + +# Bulk with implicit id + - do: + bulk: + index: index + body: | + { "index": { } } + { "doc": { "foo": "baz" } } + + - do: + indices.refresh: + index: index + + - do: + count: + index: index + + - match: { count: 4 } + --- "PUT mapping with a type and include_type_name: false": diff --git a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java index 671917c380c..f898a8d6c63 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/document/RestBulkAction.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.rest.BaseRestHandler; import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; @@ -76,7 +77,7 @@ public class RestBulkAction extends BaseRestHandler { public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { BulkRequest bulkRequest = Requests.bulkRequest(); String defaultIndex = request.param("index"); - String defaultType = request.param("type"); + String defaultType = request.param("type", MapperService.SINGLE_MAPPING_NAME); String defaultRouting = request.param("routing"); FetchSourceContext defaultFetchSourceContext = FetchSourceContext.parseFromRestRequest(request); String fieldsParam = request.param("fields"); From 5dcfdb09cb9c63c2350f85be606466103075e087 Mon Sep 17 00:00:00 2001 From: Mayya Sharipova Date: Fri, 13 Apr 2018 05:55:33 -0400 Subject: [PATCH 11/14] Control max size and count of warning headers (#28427) Control max size and count of warning headers Add a static persistent cluster level setting "http.max_warning_header_count" to control the maximum number of warning headers in client HTTP responses. Defaults to unbounded. Add a static persistent cluster level setting "http.max_warning_header_size" to control the maximum total size of warning headers in client HTTP responses. Defaults to unbounded. With every warning header that exceeds these limits, a message will be logged in the main ES log, and any more warning headers for this response will be ignored. --- docs/reference/modules/cluster/misc.asciidoc | 2 +- docs/reference/modules/http.asciidoc | 8 ++- .../common/settings/ClusterSettings.java | 2 + .../common/util/concurrent/ThreadContext.java | 68 ++++++++++++++++--- .../http/HttpTransportSettings.java | 5 +- .../java/org/elasticsearch/node/Node.java | 1 + .../logging/DeprecationLoggerTests.java | 55 +++++++++++++++ 7 files changed, 130 insertions(+), 11 deletions(-) diff --git a/docs/reference/modules/cluster/misc.asciidoc b/docs/reference/modules/cluster/misc.asciidoc index 837cfcc43eb..96867494868 100644 --- a/docs/reference/modules/cluster/misc.asciidoc +++ b/docs/reference/modules/cluster/misc.asciidoc @@ -82,4 +82,4 @@ Enable or disable allocation for persistent tasks: This setting does not affect the persistent tasks that are already being executed. Only newly created persistent tasks, or tasks that must be reassigned (after a node left the cluster, for example), are impacted by this setting. --- +-- \ No newline at end of file diff --git a/docs/reference/modules/http.asciidoc b/docs/reference/modules/http.asciidoc index 920f62043cf..c69d4991583 100644 --- a/docs/reference/modules/http.asciidoc +++ b/docs/reference/modules/http.asciidoc @@ -20,7 +20,7 @@ http://en.wikipedia.org/wiki/Chunked_transfer_encoding[HTTP chunking]. The settings in the table below can be configured for HTTP. Note that none of them are dynamically updatable so for them to take effect they should be set in -`elasticsearch.yml`. +the Elasticsearch <>. [cols="<,<",options="header",] |======================================================================= @@ -100,6 +100,12 @@ simple message will be returned. Defaults to `true` |`http.pipelining.max_events` |The maximum number of events to be queued up in memory before a HTTP connection is closed, defaults to `10000`. +|`http.max_warning_header_count` |The maximum number of warning headers in + client HTTP responses, defaults to unbounded. + +|`http.max_warning_header_size` |The maximum total size of warning headers in +client HTTP responses, defaults to unbounded. + |======================================================================= It also uses the common diff --git a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java index ced99fc8065..45eb3cf45ef 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/ClusterSettings.java @@ -245,6 +245,8 @@ public final class ClusterSettings extends AbstractScopedSettings { HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH, HttpTransportSettings.SETTING_HTTP_MAX_CHUNK_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_HEADER_SIZE, + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT, + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE, HttpTransportSettings.SETTING_HTTP_MAX_INITIAL_LINE_LENGTH, HttpTransportSettings.SETTING_HTTP_READ_TIMEOUT, HttpTransportSettings.SETTING_HTTP_RESET_COOKIES, diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 8f950c5434b..901c6425d71 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -23,10 +23,16 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.http.HttpTransportSettings; + +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; +import static org.elasticsearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; import java.io.Closeable; import java.io.IOException; @@ -39,13 +45,14 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; -import java.util.concurrent.FutureTask; import java.util.concurrent.RunnableFuture; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; +import java.nio.charset.StandardCharsets; + /** * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with @@ -81,6 +88,8 @@ public final class ThreadContext implements Closeable, Writeable { private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); private final Map defaultHeader; private final ContextThreadLocal threadLocal; + private final int maxWarningHeaderCount; + private final long maxWarningHeaderSize; /** * Creates a new ThreadContext instance @@ -98,6 +107,8 @@ public final class ThreadContext implements Closeable, Writeable { this.defaultHeader = Collections.unmodifiableMap(defaultHeader); } threadLocal = new ContextThreadLocal(); + this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings); + this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes(); } @Override @@ -282,7 +293,7 @@ public final class ThreadContext implements Closeable, Writeable { * @param uniqueValue the function that produces de-duplication values */ public void addResponseHeader(final String key, final String value, final Function uniqueValue) { - threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue)); + threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize)); } /** @@ -359,7 +370,7 @@ public final class ThreadContext implements Closeable, Writeable { private final Map transientHeaders; private final Map> responseHeaders; private final boolean isSystemContext; - + private long warningHeadersSize; //saving current warning headers' size not to recalculate the size with every new warning header private ThreadContextStruct(StreamInput in) throws IOException { final int numRequest = in.readVInt(); Map requestHeaders = numRequest == 0 ? Collections.emptyMap() : new HashMap<>(numRequest); @@ -371,6 +382,7 @@ public final class ThreadContext implements Closeable, Writeable { this.responseHeaders = in.readMapOfLists(StreamInput::readString, StreamInput::readString); this.transientHeaders = Collections.emptyMap(); isSystemContext = false; // we never serialize this it's a transient flag + this.warningHeadersSize = 0L; } private ThreadContextStruct setSystemContext() { @@ -387,6 +399,18 @@ public final class ThreadContext implements Closeable, Writeable { this.responseHeaders = responseHeaders; this.transientHeaders = transientHeaders; this.isSystemContext = isSystemContext; + this.warningHeadersSize = 0L; + } + + private ThreadContextStruct(Map requestHeaders, + Map> responseHeaders, + Map transientHeaders, boolean isSystemContext, + long warningHeadersSize) { + this.requestHeaders = requestHeaders; + this.responseHeaders = responseHeaders; + this.transientHeaders = transientHeaders; + this.isSystemContext = isSystemContext; + this.warningHeadersSize = warningHeadersSize; } /** @@ -440,30 +464,58 @@ public final class ThreadContext implements Closeable, Writeable { return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); } - private ThreadContextStruct putResponse(final String key, final String value, final Function uniqueValue) { + private ThreadContextStruct putResponse(final String key, final String value, final Function uniqueValue, + final int maxWarningHeaderCount, final long maxWarningHeaderSize) { assert value != null; + long newWarningHeaderSize = warningHeadersSize; + //check if we can add another warning header - if max size within limits + if (key.equals("Warning") && (maxWarningHeaderSize != -1)) { //if size is NOT unbounded, check its limits + if (warningHeadersSize > maxWarningHeaderSize) { // if max size has already been reached before + final String message = "Dropping a warning header, as their total size reached the maximum allowed of [" + + maxWarningHeaderSize + "] bytes set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!"; + ESLoggerFactory.getLogger(ThreadContext.class).warn(message); + return this; + } + newWarningHeaderSize += "Warning".getBytes(StandardCharsets.UTF_8).length + value.getBytes(StandardCharsets.UTF_8).length; + if (newWarningHeaderSize > maxWarningHeaderSize) { + final String message = "Dropping a warning header, as their total size reached the maximum allowed of [" + + maxWarningHeaderSize + "] bytes set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + "]!"; + ESLoggerFactory.getLogger(ThreadContext.class).warn(message); + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); + } + } final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); final List existingValues = newResponseHeaders.get(key); - if (existingValues != null) { final Set existingUniqueValues = existingValues.stream().map(uniqueValue).collect(Collectors.toSet()); assert existingValues.size() == existingUniqueValues.size(); if (existingUniqueValues.contains(uniqueValue.apply(value))) { return this; } - final List newValues = new ArrayList<>(existingValues); newValues.add(value); - newResponseHeaders.put(key, Collections.unmodifiableList(newValues)); } else { newResponseHeaders.put(key, Collections.singletonList(value)); } - return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); + //check if we can add another warning header - if max count within limits + if ((key.equals("Warning")) && (maxWarningHeaderCount != -1)) { //if count is NOT unbounded, check its limits + final int warningHeaderCount = newResponseHeaders.containsKey("Warning") ? newResponseHeaders.get("Warning").size() : 0; + if (warningHeaderCount > maxWarningHeaderCount) { + final String message = "Dropping a warning header, as their total count reached the maximum allowed of [" + + maxWarningHeaderCount + "] set in [" + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT.getKey() + "]!"; + ESLoggerFactory.getLogger(ThreadContext.class).warn(message); + return this; + } + } + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); } + private ThreadContextStruct putTransient(String key, Object value) { Map newTransient = new HashMap<>(this.transientHeaders); if (newTransient.putIfAbsent(key, value) != null) { diff --git a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java index 064406f0d38..98451e0c304 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java +++ b/server/src/main/java/org/elasticsearch/http/HttpTransportSettings.java @@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import java.util.List; -import java.util.concurrent.TimeUnit; import java.util.function.Function; import static java.util.Collections.emptyList; @@ -93,6 +92,10 @@ public final class HttpTransportSettings { Setting.byteSizeSetting("http.max_chunk_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_HEADER_SIZE = Setting.byteSizeSetting("http.max_header_size", new ByteSizeValue(8, ByteSizeUnit.KB), Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_WARNING_HEADER_COUNT = + Setting.intSetting("http.max_warning_header_count", -1, -1, Property.NodeScope); + public static final Setting SETTING_HTTP_MAX_WARNING_HEADER_SIZE = + Setting.byteSizeSetting("http.max_warning_header_size", new ByteSizeValue(-1), Property.NodeScope); public static final Setting SETTING_HTTP_MAX_INITIAL_LINE_LENGTH = Setting.byteSizeSetting("http.max_initial_line_length", new ByteSizeValue(4, ByteSizeUnit.KB), Property.NodeScope); // don't reset cookies by default, since I don't think we really need to diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 102ae719785..9fa886af8bc 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -93,6 +93,7 @@ import org.elasticsearch.gateway.GatewayModule; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.http.HttpServerTransport; +import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.indices.IndicesService; diff --git a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java index fdb530749e1..490f7961a89 100644 --- a/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java +++ b/server/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java @@ -33,6 +33,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.IntStream; +import java.nio.charset.StandardCharsets; import static org.elasticsearch.common.logging.DeprecationLogger.WARNING_HEADER_PATTERN; import static org.elasticsearch.test.hamcrest.RegexMatcher.matches; @@ -246,6 +247,60 @@ public class DeprecationLoggerTests extends ESTestCase { assertThat(DeprecationLogger.encode(s), IsSame.sameInstance(s)); } + + public void testWarningHeaderCountSetting() throws IOException{ + // Test that the number of warning headers don't exceed 'http.max_warning_header_count' + final int maxWarningHeaderCount = 2; + Settings settings = Settings.builder() + .put("http.max_warning_header_count", maxWarningHeaderCount) + .build(); + try (ThreadContext threadContext = new ThreadContext(settings)) { + final Set threadContexts = Collections.singleton(threadContext); + // try to log three warning messages + logger.deprecated(threadContexts, "A simple message 1"); + logger.deprecated(threadContexts, "A simple message 2"); + logger.deprecated(threadContexts, "A simple message 3"); + final Map> responseHeaders = threadContext.getResponseHeaders(); + final List responses = responseHeaders.get("Warning"); + + assertEquals(maxWarningHeaderCount, responses.size()); + assertThat(responses.get(0), warningValueMatcher); + assertThat(responses.get(0), containsString("\"A simple message 1")); + assertThat(responses.get(1), warningValueMatcher); + assertThat(responses.get(1), containsString("\"A simple message 2")); + } + } + + public void testWarningHeaderSizeSetting() throws IOException{ + // Test that the size of warning headers don't exceed 'http.max_warning_header_size' + Settings settings = Settings.builder() + .put("http.max_warning_header_size", "1Kb") + .build(); + + byte [] arr = new byte[300]; + String message1 = new String(arr, StandardCharsets.UTF_8) + "1"; + String message2 = new String(arr, StandardCharsets.UTF_8) + "2"; + String message3 = new String(arr, StandardCharsets.UTF_8) + "3"; + + try (ThreadContext threadContext = new ThreadContext(settings)) { + final Set threadContexts = Collections.singleton(threadContext); + // try to log three warning messages + logger.deprecated(threadContexts, message1); + logger.deprecated(threadContexts, message2); + logger.deprecated(threadContexts, message3); + final Map> responseHeaders = threadContext.getResponseHeaders(); + final List responses = responseHeaders.get("Warning"); + + long warningHeadersSize = 0L; + for (String response : responses){ + warningHeadersSize += "Warning".getBytes(StandardCharsets.UTF_8).length + + response.getBytes(StandardCharsets.UTF_8).length; + } + // assert that the size of all warning headers is less or equal to 1Kb + assertTrue(warningHeadersSize <= 1024); + } + } + private String range(int lowerInclusive, int upperInclusive) { return IntStream .range(lowerInclusive, upperInclusive + 1) From 782517b4521f8fd595a3bed5c8a66986ad2ec86b Mon Sep 17 00:00:00 2001 From: Chandan83 Date: Fri, 13 Apr 2018 18:21:03 +0530 Subject: [PATCH 12/14] Adds SpanGapQueryBuilder in the query DSL (#28636) This change adds the support for a `span_gap` query inside the span query DSL. --- .../index/query/SpanNearQueryBuilder.java | 218 +++++++++++++++++- .../elasticsearch/search/SearchModule.java | 2 + .../index/query/SpanGapQueryBuilderTests.java | 127 ++++++++++ .../query/SpanNearQueryBuilderTests.java | 1 + .../search/SearchModuleTests.java | 1 + 5 files changed, 341 insertions(+), 8 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java diff --git a/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java index 7ff181acb90..d4333fa0bc5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SpanNearQueryBuilder.java @@ -24,9 +24,11 @@ import org.apache.lucene.search.spans.SpanNearQuery; import org.apache.lucene.search.spans.SpanQuery; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.ParsingException; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentLocation; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -203,18 +205,54 @@ public class SpanNearQueryBuilder extends AbstractQueryBuilder(SpanFirstQueryBuilder.NAME, SpanFirstQueryBuilder::new, SpanFirstQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(SpanNearQueryBuilder.NAME, SpanNearQueryBuilder::new, SpanNearQueryBuilder::fromXContent)); + registerQuery(new QuerySpec<>(SpanGapQueryBuilder.NAME, SpanGapQueryBuilder::new, SpanGapQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(SpanOrQueryBuilder.NAME, SpanOrQueryBuilder::new, SpanOrQueryBuilder::fromXContent)); registerQuery(new QuerySpec<>(MoreLikeThisQueryBuilder.NAME, MoreLikeThisQueryBuilder::new, MoreLikeThisQueryBuilder::fromXContent)); diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java new file mode 100644 index 00000000000..024d43b1a6b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/index/query/SpanGapQueryBuilderTests.java @@ -0,0 +1,127 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.query; + +import org.apache.lucene.search.Query; +import org.apache.lucene.search.spans.SpanBoostQuery; +import org.apache.lucene.search.spans.SpanNearQuery; +import org.apache.lucene.search.spans.SpanQuery; +import org.apache.lucene.search.spans.SpanTermQuery; +import org.elasticsearch.common.ParsingException; +import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.test.AbstractQueryTestCase; + +import java.io.IOException; +import java.util.Iterator; + +import static org.elasticsearch.index.query.SpanNearQueryBuilder.SpanGapQueryBuilder; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.either; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; + +/* + * SpanGapQueryBuilder, unlike other QBs, is not used to build a Query. Therefore, it is not suited + * to test pattern of AbstractQueryTestCase. Since it is only used in SpanNearQueryBuilder, its test cases + * are same as those of later with SpanGapQueryBuilder included as clauses. + */ + +public class SpanGapQueryBuilderTests extends AbstractQueryTestCase { + @Override + protected SpanNearQueryBuilder doCreateTestQueryBuilder() { + SpanTermQueryBuilder[] spanTermQueries = new SpanTermQueryBuilderTests().createSpanTermQueryBuilders(randomIntBetween(1, 6)); + SpanNearQueryBuilder queryBuilder = new SpanNearQueryBuilder(spanTermQueries[0], randomIntBetween(-10, 10)); + for (int i = 1; i < spanTermQueries.length; i++) { + SpanTermQueryBuilder termQB = spanTermQueries[i]; + queryBuilder.addClause(termQB); + if (i % 2 == 1) { + SpanGapQueryBuilder gapQB = new SpanGapQueryBuilder(termQB.fieldName(), randomIntBetween(1,2)); + queryBuilder.addClause(gapQB); + } + } + queryBuilder.inOrder(true); + return queryBuilder; + } + + @Override + protected void doAssertLuceneQuery(SpanNearQueryBuilder queryBuilder, Query query, SearchContext context) throws IOException { + assertThat(query, either(instanceOf(SpanNearQuery.class)) + .or(instanceOf(SpanTermQuery.class)) + .or(instanceOf(SpanBoostQuery.class)) + .or(instanceOf(MatchAllQueryBuilder.class))); + if (query instanceof SpanNearQuery) { + SpanNearQuery spanNearQuery = (SpanNearQuery) query; + assertThat(spanNearQuery.getSlop(), equalTo(queryBuilder.slop())); + assertThat(spanNearQuery.isInOrder(), equalTo(queryBuilder.inOrder())); + assertThat(spanNearQuery.getClauses().length, equalTo(queryBuilder.clauses().size())); + Iterator spanQueryBuilderIterator = queryBuilder.clauses().iterator(); + for (SpanQuery spanQuery : spanNearQuery.getClauses()) { + SpanQueryBuilder spanQB = spanQueryBuilderIterator.next(); + if (spanQB instanceof SpanGapQueryBuilder) continue; + assertThat(spanQuery, equalTo(spanQB.toQuery(context.getQueryShardContext()))); + } + } else if (query instanceof SpanTermQuery || query instanceof SpanBoostQuery) { + assertThat(queryBuilder.clauses().size(), equalTo(1)); + assertThat(query, equalTo(queryBuilder.clauses().get(0).toQuery(context.getQueryShardContext()))); + } + } + + public void testIllegalArguments() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SpanGapQueryBuilder(null, 1)); + assertEquals("[span_gap] field name is null or empty", e.getMessage()); + } + + public void testFromJson() throws IOException { + String json = + "{\n" + + " \"span_near\" : {\n" + + " \"clauses\" : [ {\n" + + " \"span_term\" : {\n" + + " \"field\" : {\n" + + " \"value\" : \"value1\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " }, {\n" + + " \"span_gap\" : {\n" + + " \"field\" : 2" + + " }\n" + + " }, {\n" + + " \"span_term\" : {\n" + + " \"field\" : {\n" + + " \"value\" : \"value3\",\n" + + " \"boost\" : 1.0\n" + + " }\n" + + " }\n" + + " } ],\n" + + " \"slop\" : 12,\n" + + " \"in_order\" : false,\n" + + " \"boost\" : 1.0\n" + + " }\n" + + "}"; + + SpanNearQueryBuilder parsed = (SpanNearQueryBuilder) parseQuery(json); + checkGeneratedJson(json, parsed); + + assertEquals(json, 3, parsed.clauses().size()); + assertEquals(json, 12, parsed.slop()); + assertEquals(json, false, parsed.inOrder()); + } +} diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java index 21b15fe53fa..359793adcf6 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanNearQueryBuilderTests.java @@ -184,4 +184,5 @@ public class SpanNearQueryBuilderTests extends AbstractQueryTestCase parseQuery(json)); assertThat(e.getMessage(), containsString("[span_near] query does not support [collect_payloads]")); } + } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index ca5efe02367..78040f5bfb2 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -324,6 +324,7 @@ public class SearchModuleTests extends ModuleTestCase { "simple_query_string", "span_containing", "span_first", + "span_gap", "span_multi", "span_near", "span_not", From eab530ce11390875fa9a9e0aded1c9b27a0be55c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 13 Apr 2018 08:49:25 +0200 Subject: [PATCH 13/14] Ensure flush happens on shard idle This adds 2 testcases that test if a shard goes idle pending (uncommitted) segments are committed and unreferenced files will be freed. Relates to #29482 --- .../admin/indices/create/ShrinkIndexIT.java | 77 ++++++++++++++++++ .../ESIndexLevelReplicationTestCase.java | 2 +- .../index/shard/IndexShardTests.java | 78 ++++++++++++++++++- .../BlobStoreRepositoryRestoreTests.java | 3 +- .../index/shard/IndexShardTestCase.java | 15 ++-- 5 files changed, 163 insertions(+), 12 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 44f7e38e3ad..e48f151081f 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -27,6 +27,9 @@ import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.segments.IndexShardSegments; +import org.elasticsearch.action.admin.indices.segments.IndicesSegmentResponse; +import org.elasticsearch.action.admin.indices.segments.ShardSegments; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -64,6 +67,7 @@ import org.elasticsearch.test.VersionUtils; import java.util.Arrays; import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.stream.IntStream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -467,4 +471,77 @@ public class ShrinkIndexIT extends ESIntegTestCase { flushAndRefresh(); assertSortedSegments("target", expectedIndexSort); } + + + public void testShrinkCommitsMergeOnIdle() throws Exception { + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("index.number_of_replicas", 0) + .put("number_of_shards", 5)).get(); + for (int i = 0; i < 30; i++) { + client().prepareIndex("source", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + client().admin().indices().prepareFlush("source").get(); + ImmutableOpenMap dataNodes = + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.routing.allocation.require._name", discoveryNodes[0].getName()) + .put("index.blocks.write", true)).get(); + ensureGreen(); + IndicesSegmentResponse sourceStats = client().admin().indices().prepareSegments("source").get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none" + )).get(); + + // now merge source into a single shard index + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") + .setSettings(Settings.builder().put("index.number_of_replicas", 0).build()).get()); + ensureGreen(); + ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get(); + IndexMetaData target = clusterStateResponse.getState().getMetaData().index("target"); + client().admin().indices().prepareForceMerge("target").setMaxNumSegments(1).setFlush(false).get(); + IndicesSegmentResponse targetSegStats = client().admin().indices().prepareSegments("target").get(); + ShardSegments segmentsStats = targetSegStats.getIndices().get("target").getShards().get(0).getShards()[0]; + assertTrue(segmentsStats.getNumberOfCommitted() > 0); + assertNotEquals(segmentsStats.getSegments(), segmentsStats.getNumberOfCommitted()); + + Iterable dataNodeInstances = internalCluster().getDataNodeInstances(IndicesService.class); + for (IndicesService service : dataNodeInstances) { + if (service.hasIndex(target.getIndex())) { + IndexService indexShards = service.indexService(target.getIndex()); + IndexShard shard = indexShards.getShard(0); + assertTrue(shard.isActive()); + shard.checkIdle(0); + assertFalse(shard.isActive()); + } + } + assertBusy(() -> { + IndicesSegmentResponse targetStats = client().admin().indices().prepareSegments("target").get(); + ShardSegments targetShardSegments = targetStats.getIndices().get("target").getShards().get(0).getShards()[0]; + Map source = sourceStats.getIndices().get("source").getShards(); + int numSourceSegments = 0; + for (IndexShardSegments s : source.values()) { + numSourceSegments += s.getAt(0).getNumberOfCommitted(); + } + assertTrue(targetShardSegments.getSegments().size() < numSourceSegments); + assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getNumberOfSearch()); + assertEquals(targetShardSegments.getNumberOfCommitted(), targetShardSegments.getSegments().size()); + assertEquals(1, targetShardSegments.getSegments().size()); + }); + + // clean up + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null + )).get(); + } } diff --git a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index ba5b43b1d92..97fc1b528ac 100644 --- a/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/server/src/test/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -264,7 +264,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase RecoverySource.PeerRecoverySource.INSTANCE); final IndexShard newReplica = - newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting), () -> {}); + newShard(shardRouting, shardPath, indexMetaData, null, getEngineFactory(shardRouting), () -> {}, EMPTY_EVENT_LISTENER); replicas.add(newReplica); updateAllocationIDsOnPrimary(); return newReplica; diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 08f0319d6cf..5506bc515f2 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -72,10 +72,12 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.MergePolicyConfig; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.EngineException; import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.engine.SegmentsStats; import org.elasticsearch.index.fielddata.FieldDataStats; import org.elasticsearch.index.fielddata.IndexFieldData; @@ -1867,7 +1869,7 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(shard); IndexShard newShard = newShard( ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), - shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}); + shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}, EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2013,7 +2015,7 @@ public class IndexShardTests extends IndexShardTestCase { closeShards(shard); IndexShard newShard = newShard( ShardRoutingHelper.initWithSameId(shard.routingEntry(), RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE), - shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}); + shard.shardPath(), shard.indexSettings().getIndexMetaData(), wrapper, null, () -> {}, EMPTY_EVENT_LISTENER); recoverShardFromStore(newShard); @@ -2496,7 +2498,7 @@ public class IndexShardTests extends IndexShardTestCase { .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), randomFrom("false", "true", "checksum", "fix"))) .build(); final IndexShard newShard = newShard(shardRouting, indexShard.shardPath(), indexMetaData, - null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer()); + null, indexShard.engineFactory, indexShard.getGlobalCheckpointSyncer(), EMPTY_EVENT_LISTENER); Store.MetadataSnapshot storeFileMetaDatas = newShard.snapshotStoreMetadata(); assertTrue("at least 2 files, commit and data: " + storeFileMetaDatas.toString(), storeFileMetaDatas.size() > 1); @@ -2980,4 +2982,74 @@ public class IndexShardTests extends IndexShardTestCase { breaker = primary.circuitBreakerService.getBreaker(CircuitBreaker.ACCOUNTING); assertThat(breaker.getUsed(), equalTo(0L)); } + + public void testFlushOnInactive() throws Exception { + Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + IndexMetaData metaData = IndexMetaData.builder("test") + .putMapping("test", "{ \"properties\": { \"foo\": { \"type\": \"text\"}}}") + .settings(settings) + .primaryTerm(0, 1).build(); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(metaData.getIndex(), 0), "n1", true, ShardRoutingState + .INITIALIZING, RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE); + final ShardId shardId = shardRouting.shardId(); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + AtomicBoolean markedInactive = new AtomicBoolean(); + AtomicReference primaryRef = new AtomicReference<>(); + IndexShard primary = newShard(shardRouting, shardPath, metaData, null, null, () -> { + }, new IndexEventListener() { + @Override + public void onShardInactive(IndexShard indexShard) { + markedInactive.set(true); + primaryRef.get().flush(new FlushRequest()); + } + }); + primaryRef.set(primary); + recoverShardFromStore(primary); + for (int i = 0; i < 3; i++) { + indexDoc(primary, "test", "" + i, "{\"foo\" : \"" + randomAlphaOfLength(10) + "\"}"); + primary.refresh("test"); // produce segments + } + List segments = primary.segments(false); + Set names = new HashSet<>(); + for (Segment segment : segments) { + assertFalse(segment.committed); + assertTrue(segment.search); + names.add(segment.getName()); + } + assertEquals(3, segments.size()); + primary.flush(new FlushRequest()); + primary.forceMerge(new ForceMergeRequest().maxNumSegments(1).flush(false)); + primary.refresh("test"); + segments = primary.segments(false); + for (Segment segment : segments) { + if (names.contains(segment.getName())) { + assertTrue(segment.committed); + assertFalse(segment.search); + } else { + assertFalse(segment.committed); + assertTrue(segment.search); + } + } + assertEquals(4, segments.size()); + + assertFalse(markedInactive.get()); + assertBusy(() -> { + primary.checkIdle(0); + assertFalse(primary.isActive()); + }); + + assertTrue(markedInactive.get()); + segments = primary.segments(false); + assertEquals(1, segments.size()); + for (Segment segment : segments) { + assertTrue(segment.committed); + assertTrue(segment.search); + } + closeShards(primary); + } + } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java index 4c079b545c2..4830d48df79 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryRestoreTests.java @@ -97,7 +97,8 @@ public class BlobStoreRepositoryRestoreTests extends IndexShardTestCase { // build a new shard using the same store directory as the closed shard ShardRouting shardRouting = ShardRoutingHelper.initWithSameId(shard.routingEntry(), EXISTING_STORE_INSTANCE); - shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, null, () -> {}); + shard = newShard(shardRouting, shard.shardPath(), shard.indexSettings().getIndexMetaData(), null, null, () -> {}, + EMPTY_EVENT_LISTENER); // restore the shard recoverShardFromSnapshot(shard, snapshot, repository); diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index eb23ac3a80d..0d535d9af38 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -105,6 +105,8 @@ import static org.hamcrest.Matchers.hasSize; */ public abstract class IndexShardTestCase extends ESTestCase { + public static final IndexEventListener EMPTY_EVENT_LISTENER = new IndexEventListener() {}; + protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { @Override public void onRecoveryDone(RecoveryState state) { @@ -260,24 +262,25 @@ public abstract class IndexShardTestCase extends ESTestCase { final ShardId shardId = routing.shardId(); final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); - return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, listeners); + return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, engineFactory, globalCheckpointSyncer, + EMPTY_EVENT_LISTENER, listeners); } /** * creates a new initializing shard. - * - * @param routing shard routing to use + * @param routing shard routing to use * @param shardPath path to use for shard data * @param indexMetaData indexMetaData for the shard, including any mapping * @param indexSearcherWrapper an optional wrapper to be used during searchers * @param globalCheckpointSyncer callback for syncing global checkpoints + * @param indexEventListener * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData, @Nullable IndexSearcherWrapper indexSearcherWrapper, @Nullable EngineFactory engineFactory, Runnable globalCheckpointSyncer, - IndexingOperationListener... listeners) throws IOException { + IndexEventListener indexEventListener, IndexingOperationListener... listeners) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings); final IndexShard indexShard; @@ -289,8 +292,6 @@ public abstract class IndexShardTestCase extends ESTestCase { indexSettings.getSettings(), "index"); mapperService.merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); - final IndexEventListener indexEventListener = new IndexEventListener() { - }; final Engine.Warmer warmer = searcher -> { }; ClusterSettings clusterSettings = new ClusterSettings(nodeSettings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); @@ -335,7 +336,7 @@ public abstract class IndexShardTestCase extends ESTestCase { null, current.engineFactory, current.getGlobalCheckpointSyncer(), - listeners); + EMPTY_EVENT_LISTENER, listeners); } /** From 694e2a997003c3323dfdd79c1465c3525c79df8c Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Fri, 13 Apr 2018 15:23:44 +0200 Subject: [PATCH 14/14] Add remote cluster client (#29495) This change adds a client that is connected to a remote cluster. This allows plugins and internal structures to invoke actions on remote clusters just like a if it's a local cluster. The remote cluster must be configured via the cross cluster search infrastructure. --- .../java/org/elasticsearch/client/Client.java | 10 ++ .../elasticsearch/client/FilterClient.java | 5 + .../elasticsearch/client/node/NodeClient.java | 11 ++- .../java/org/elasticsearch/node/Node.java | 2 +- .../transport/RemoteClusterAwareClient.java | 67 +++++++++++++ .../transport/RemoteClusterService.java | 16 ++++ .../client/node/NodeClientHeadersTests.java | 2 +- .../transport/RemoteClusterClientTests.java | 94 +++++++++++++++++++ 8 files changed, 204 insertions(+), 3 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java create mode 100644 server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java diff --git a/server/src/main/java/org/elasticsearch/client/Client.java b/server/src/main/java/org/elasticsearch/client/Client.java index 2c61653f61c..adb2f509b99 100644 --- a/server/src/main/java/org/elasticsearch/client/Client.java +++ b/server/src/main/java/org/elasticsearch/client/Client.java @@ -477,4 +477,14 @@ public interface Client extends ElasticsearchClient, Releasable { * issued from it. */ Client filterWithHeader(Map headers); + + /** + * Returns a client to a remote cluster with the given cluster alias. + * + * @throws IllegalArgumentException if the given clusterAlias doesn't exist + * @throws UnsupportedOperationException if this functionality is not available on this client. + */ + default Client getRemoteClusterClient(String clusterAlias) { + throw new UnsupportedOperationException("this client doesn't support remote cluster connections"); + } } diff --git a/server/src/main/java/org/elasticsearch/client/FilterClient.java b/server/src/main/java/org/elasticsearch/client/FilterClient.java index 23d3c2c3d0c..92f6817b74b 100644 --- a/server/src/main/java/org/elasticsearch/client/FilterClient.java +++ b/server/src/main/java/org/elasticsearch/client/FilterClient.java @@ -73,4 +73,9 @@ public abstract class FilterClient extends AbstractClient { protected Client in() { return in; } + + @Override + public Client getRemoteClusterClient(String clusterAlias) { + return in.getRemoteClusterClient(clusterAlias); + } } diff --git a/server/src/main/java/org/elasticsearch/client/node/NodeClient.java b/server/src/main/java/org/elasticsearch/client/node/NodeClient.java index e4f26b15702..69bf5d21f7a 100644 --- a/server/src/main/java/org/elasticsearch/client/node/NodeClient.java +++ b/server/src/main/java/org/elasticsearch/client/node/NodeClient.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskListener; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.RemoteClusterService; import java.util.Map; import java.util.function.Supplier; @@ -48,14 +49,17 @@ public class NodeClient extends AbstractClient { * {@link #executeLocally(GenericAction, ActionRequest, TaskListener)}. */ private Supplier localNodeId; + private RemoteClusterService remoteClusterService; public NodeClient(Settings settings, ThreadPool threadPool) { super(settings, threadPool); } - public void initialize(Map actions, Supplier localNodeId) { + public void initialize(Map actions, Supplier localNodeId, + RemoteClusterService remoteClusterService) { this.actions = actions; this.localNodeId = localNodeId; + this.remoteClusterService = remoteClusterService; } @Override @@ -117,4 +121,9 @@ public class NodeClient extends AbstractClient { } return transportAction; } + + @Override + public Client getRemoteClusterClient(String clusterAlias) { + return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); + } } diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 9fa886af8bc..b02e1614bbd 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -538,7 +538,7 @@ public class Node implements Closeable { resourcesToClose.addAll(pluginLifecycleComponents); this.pluginLifecycleComponents = Collections.unmodifiableList(pluginLifecycleComponents); client.initialize(injector.getInstance(new Key>() {}), - () -> clusterService.localNode().getId()); + () -> clusterService.localNode().getId(), transportService.getRemoteClusterService()); if (NetworkModule.HTTP_ENABLED.get(settings)) { logger.debug("initializing HTTP handlers ..."); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java new file mode 100644 index 00000000000..aa476bf4dd2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAwareClient.java @@ -0,0 +1,67 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.action.Action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionListenerResponseHandler; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionRequestBuilder; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.client.support.AbstractClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.threadpool.ThreadPool; + +final class RemoteClusterAwareClient extends AbstractClient { + + private final TransportService service; + private final String clusterAlias; + private final RemoteClusterService remoteClusterService; + + RemoteClusterAwareClient(Settings settings, ThreadPool threadPool, TransportService service, String clusterAlias) { + super(settings, threadPool); + this.service = service; + this.clusterAlias = clusterAlias; + this.remoteClusterService = service.getRemoteClusterService(); + } + + @Override + protected > + void doExecute(Action action, Request request, ActionListener listener) { + remoteClusterService.ensureConnected(clusterAlias, ActionListener.wrap(res -> { + Transport.Connection connection = remoteClusterService.getConnection(clusterAlias); + service.sendRequest(connection, action.name(), request, TransportRequestOptions.EMPTY, + new ActionListenerResponseHandler<>(listener, action::newResponse)); + }, + listener::onFailure)); + } + + + @Override + public void close() { + // do nothing + } + + @Override + public Client getRemoteClusterClient(String clusterAlias) { + return remoteClusterService.getRemoteClusterClient(threadPool(), clusterAlias); + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index b253d9d23df..f4545713017 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.transport; +import org.elasticsearch.client.Client; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -36,6 +37,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.threadpool.ThreadPool; import java.io.Closeable; import java.io.IOException; @@ -398,4 +400,18 @@ public final class RemoteClusterService extends RemoteClusterAware implements Cl }); } } + + /** + * Returns a client to the remote cluster if the given cluster alias exists. + * @param threadPool the {@link ThreadPool} for the client + * @param clusterAlias the cluster alias the remote cluster is registered under + * + * @throws IllegalArgumentException if the given clusterAlias doesn't exist + */ + public Client getRemoteClusterClient(ThreadPool threadPool, String clusterAlias) { + if (transportService.getRemoteClusterService().getRemoteClusterNames().contains(clusterAlias) == false) { + throw new IllegalArgumentException("unknown cluster alias [" + clusterAlias + "]"); + } + return new RemoteClusterAwareClient(settings, threadPool, transportService, clusterAlias); + } } diff --git a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java index bca04738d8b..5e739cc3250 100644 --- a/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java +++ b/server/src/test/java/org/elasticsearch/client/node/NodeClientHeadersTests.java @@ -43,7 +43,7 @@ public class NodeClientHeadersTests extends AbstractClientHeadersTestCase { Settings settings = HEADER_SETTINGS; Actions actions = new Actions(settings, threadPool, testedActions); NodeClient client = new NodeClient(settings, threadPool); - client.initialize(actions, () -> "test"); + client.initialize(actions, () -> "test", null); return client; } diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java new file mode 100644 index 00000000000..6008b7900a0 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterClientTests.java @@ -0,0 +1,94 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.transport; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; + +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.elasticsearch.transport.RemoteClusterConnectionTests.startTransport; + +public class RemoteClusterClientTests extends ESTestCase { + private final ThreadPool threadPool = new TestThreadPool(getClass().getName()); + + @Override + public void tearDown() throws Exception { + super.tearDown(); + ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS); + } + + public void testConnectAndExecuteRequest() throws Exception { + Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build(); + try (MockTransportService remoteTransport = startTransport("remote_node", Collections.emptyList(), Version.CURRENT, threadPool, + remoteSettings)) { + DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + + Settings localSettings = Settings.builder() + .put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true) + .put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); + try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + assertTrue(remoteClusterService.isRemoteNodeConnected("test", remoteNode)); + Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().execute().get(); + assertNotNull(clusterStateResponse); + assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value()); + // also test a failure, there is no handler for search registered + ActionNotFoundTransportException ex = expectThrows(ActionNotFoundTransportException.class, + () -> client.prepareSearch().get()); + assertEquals("No handler for action [indices:data/read/search]", ex.getMessage()); + } + } + } + + public void testEnsureWeReconnect() throws Exception { + Settings remoteSettings = Settings.builder().put(ClusterName.CLUSTER_NAME_SETTING.getKey(), "foo_bar_cluster").build(); + try (MockTransportService remoteTransport = startTransport("remote_node", Collections.emptyList(), Version.CURRENT, threadPool, + remoteSettings)) { + DiscoveryNode remoteNode = remoteTransport.getLocalDiscoNode(); + Settings localSettings = Settings.builder() + .put(RemoteClusterService.ENABLE_REMOTE_CLUSTERS.getKey(), true) + .put("search.remote.test.seeds", remoteNode.getAddress().getAddress() + ":" + remoteNode.getAddress().getPort()).build(); + try (MockTransportService service = MockTransportService.createNewService(localSettings, Version.CURRENT, threadPool, null)) { + service.start(); + service.acceptIncomingRequests(); + service.disconnectFromNode(remoteNode); + RemoteClusterService remoteClusterService = service.getRemoteClusterService(); + assertBusy(() -> assertFalse(remoteClusterService.isRemoteNodeConnected("test", remoteNode))); + Client client = remoteClusterService.getRemoteClusterClient(threadPool, "test"); + ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().execute().get(); + assertNotNull(clusterStateResponse); + assertEquals("foo_bar_cluster", clusterStateResponse.getState().getClusterName().value()); + } + } + } + +}