From d2cd36bd9f96645a76a99eea2963dc758d1c32d5 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 10:38:03 +0200 Subject: [PATCH 01/40] Upgrade to Netty 4.1.36 (#42543) (#42564) --- buildSrc/version.properties | 2 +- .../licenses/netty-buffer-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-buffer-4.1.36.Final.jar.sha1 | 1 + .../transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 | 1 - .../transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-common-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-common-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-handler-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-handler-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-resolver-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-resolver-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-codec-http-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-codec-http-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 | 1 + .../transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 | 1 - .../transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 | 1 + .../licenses/netty-transport-4.1.35.Final.jar.sha1 | 1 - .../licenses/netty-transport-4.1.36.Final.jar.sha1 | 1 + 29 files changed, 15 insertions(+), 15 deletions(-) delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 8a954885e3d..0d138fa3255 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -20,7 +20,7 @@ slf4j = 1.6.2 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 4.5.1 -netty = 4.1.35.Final +netty = 4.1.36.Final joda = 2.10.2 # when updating this version, you need to ensure compatibility with: diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 deleted file mode 100644 index 6112faf2d01..00000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a244722975cddaef5f9bbd45e7a44d0db5f058d8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..90895a5e168 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +7f2db0921dd57df4db076229830ab09bba713aeb \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 deleted file mode 100644 index 811797decc1..00000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b86f6b9eedbe38d6fa0bbbefa961d566e293e13e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..efd6e5a3277 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8462116d327bb3d1ec24258071f2e7345a73dbfc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 deleted file mode 100644 index 3b0f1f7daa3..00000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a38b0a3ee2fff3d9dd2bb44f5e16140b70b354 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..4e86fef0e12 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +62b73d439dbddf3c0dde092b048580139695ab46 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 deleted file mode 100644 index 26576f8e9cc..00000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c776487b782046e1399b00cd40c63ef51d26e953 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..d9d50d776e9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +f6f38fde652a70ea579897edc80e52353e487ae6 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 deleted file mode 100644 index 0956313b2aa..00000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b23efe31416942718ac46ad00bb3e91e4b3f6ab7 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..d943140f363 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +1c38a5920a10c01b1cce4cdc964447ec76abf1b5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 deleted file mode 100644 index cdd335d0591..00000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60c4f4e12f0703dff477c9bf595f37a41ecacbc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..1499233b60d --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +e4d243fbf4e6837fa294f892bf97149e18129100 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 deleted file mode 100644 index 8f52a39c4f7..00000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -526b2646885c57adb54e2f89b2e2b80bebce3962 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..f36c1b17d74 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8546e6be47be587acab86bbd106ca023678f07d9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 deleted file mode 100644 index 6112faf2d01..00000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a244722975cddaef5f9bbd45e7a44d0db5f058d8 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..90895a5e168 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +7f2db0921dd57df4db076229830ab09bba713aeb \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 deleted file mode 100644 index 811797decc1..00000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b86f6b9eedbe38d6fa0bbbefa961d566e293e13e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..efd6e5a3277 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8462116d327bb3d1ec24258071f2e7345a73dbfc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 deleted file mode 100644 index 3b0f1f7daa3..00000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f7a38b0a3ee2fff3d9dd2bb44f5e16140b70b354 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..4e86fef0e12 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +62b73d439dbddf3c0dde092b048580139695ab46 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 deleted file mode 100644 index 26576f8e9cc..00000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c776487b782046e1399b00cd40c63ef51d26e953 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..d9d50d776e9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +f6f38fde652a70ea579897edc80e52353e487ae6 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 deleted file mode 100644 index 0956313b2aa..00000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b23efe31416942718ac46ad00bb3e91e4b3f6ab7 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..d943140f363 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +1c38a5920a10c01b1cce4cdc964447ec76abf1b5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 deleted file mode 100644 index cdd335d0591..00000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d60c4f4e12f0703dff477c9bf595f37a41ecacbc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..1499233b60d --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +e4d243fbf4e6837fa294f892bf97149e18129100 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 deleted file mode 100644 index 8f52a39c4f7..00000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.35.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -526b2646885c57adb54e2f89b2e2b80bebce3962 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 new file mode 100644 index 00000000000..f36c1b17d74 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.36.Final.jar.sha1 @@ -0,0 +1 @@ +8546e6be47be587acab86bbd106ca023678f07d9 \ No newline at end of file From a5ca20a2506328f7068736cb7a447ef83ceb51e4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:04:54 +0200 Subject: [PATCH 02/40] Some Cleanup in o.e.i.engine (#42278) (#42566) * Some Cleanup in o.e.i.engine * Remove dead code and parameters * Reduce visibility in some obvious spots * Add missing `assert`s (not that important here since the methods themselves will probably be dead-code eliminated) but still --- .../elasticsearch/index/engine/Engine.java | 10 +-- .../index/engine/InternalEngine.java | 35 +++++----- .../index/engine/LiveVersionMap.java | 2 +- .../index/engine/LuceneChangesSnapshot.java | 3 +- .../index/engine/ReadOnlyEngine.java | 4 +- .../index/engine/RecoveryCounter.java | 65 ------------------- .../RecoverySourcePruneMergePolicy.java | 3 +- .../elasticsearch/index/engine/Segment.java | 18 ++--- .../index/engine/SegmentsStats.java | 25 ++++--- .../engine/SnapshotFailedEngineException.java | 7 +- .../index/engine/TranslogLeafReader.java | 16 ++--- .../VersionConflictEngineException.java | 6 +- .../index/engine/FrozenEngine.java | 2 +- 13 files changed, 50 insertions(+), 146 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 63659126f84..2d210b716d4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -911,7 +911,7 @@ public abstract class Engine implements Closeable { map.put(extension, length); } - if (useCompoundFile && directory != null) { + if (useCompoundFile) { try { directory.close(); } catch (IOException e) { @@ -954,8 +954,7 @@ public abstract class Engine implements Closeable { // now, correlate or add the committed ones... if (lastCommittedSegmentInfos != null) { - SegmentInfos infos = lastCommittedSegmentInfos; - for (SegmentCommitInfo info : infos) { + for (SegmentCommitInfo info : lastCommittedSegmentInfos) { Segment segment = segments.get(info.info.name); if (segment == null) { segment = new Segment(info.info.name); @@ -1783,11 +1782,8 @@ public abstract class Engine implements Closeable { CommitId commitId = (CommitId) o; - if (!Arrays.equals(id, commitId.id)) { - return false; - } + return Arrays.equals(id, commitId.id); - return true; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index 9468237881f..9845c90a1db 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -563,7 +563,7 @@ public class InternalEngine extends Engine { /** * Reads the current stored history ID from the IW commit data. */ - private String loadHistoryUUID(final IndexWriter writer) throws IOException { + private String loadHistoryUUID(final IndexWriter writer) { final String uuid = commitDataAsMap(writer).get(HISTORY_UUID_KEY); if (uuid == null) { throw new IllegalStateException("commit doesn't contain history uuid"); @@ -635,9 +635,8 @@ public class InternalEngine extends Engine { if (operation != null) { // in the case of a already pruned translog generation we might get null here - yet very unlikely final Translog.Index index = (Translog.Index) operation; - TranslogLeafReader reader = new TranslogLeafReader(index, engineConfig - .getIndexSettings().getIndexVersionCreated()); - return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader::close), + TranslogLeafReader reader = new TranslogLeafReader(index); + return new GetResult(new Searcher("realtime_get", new IndexSearcher(reader), reader), new VersionsAndSeqNoResolver.DocIdAndVersion(0, index.version(), index.seqNo(), index.primaryTerm(), reader, 0)); } @@ -756,7 +755,7 @@ public class InternalEngine extends Engine { + index.getAutoGeneratedIdTimestamp(); switch (index.origin()) { case PRIMARY: - assertPrimaryCanOptimizeAddDocument(index); + assert assertPrimaryCanOptimizeAddDocument(index); return true; case PEER_RECOVERY: case REPLICA: @@ -782,7 +781,7 @@ public class InternalEngine extends Engine { private boolean assertIncomingSequenceNumber(final Engine.Operation.Origin origin, final long seqNo) { if (origin == Operation.Origin.PRIMARY) { - assertPrimaryIncomingSequenceNumber(origin, seqNo); + assert assertPrimaryIncomingSequenceNumber(origin, seqNo); } else { // sequence number should be set when operation origin is not primary assert seqNo >= 0 : "recovery or replica ops should have an assigned seq no.; origin: " + origin; @@ -923,7 +922,7 @@ public class InternalEngine extends Engine { } protected final IndexingStrategy planIndexingAsNonPrimary(Index index) throws IOException { - assertNonPrimaryOrigin(index); + assert assertNonPrimaryOrigin(index); final IndexingStrategy plan; final boolean appendOnlyRequest = canOptimizeAddDocument(index); if (appendOnlyRequest && mayHaveBeenIndexedBefore(index) == false && index.seqNo() > maxSeqNoOfNonAppendOnlyOperations.get()) { @@ -978,13 +977,13 @@ public class InternalEngine extends Engine { } } - protected final IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { + private IndexingStrategy planIndexingAsPrimary(Index index) throws IOException { assert index.origin() == Operation.Origin.PRIMARY : "planing as primary but origin isn't. got " + index.origin(); final IndexingStrategy plan; // resolve an external operation into an internal one which is safe to replay if (canOptimizeAddDocument(index)) { if (mayHaveBeenIndexedBefore(index)) { - plan = IndexingStrategy.overrideExistingAsIfNotThere(1L); + plan = IndexingStrategy.overrideExistingAsIfNotThere(); versionMap.enforceSafeAccess(); } else { plan = IndexingStrategy.optimizedAppendOnly(1L); @@ -1006,7 +1005,7 @@ public class InternalEngine extends Engine { if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, index.id(), index.getIfSeqNo(), index.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - plan = IndexingStrategy.skipDueToVersionConflict(e, currentNotFoundOrDeleted, currentVersion, getPrimaryTerm()); + plan = IndexingStrategy.skipDueToVersionConflict(e, true, currentVersion, getPrimaryTerm()); } else if (index.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != index.getIfSeqNo() || versionValue.term != index.getIfPrimaryTerm() )) { @@ -1164,9 +1163,9 @@ public class InternalEngine extends Engine { true, false, versionForIndexing, null); } - static IndexingStrategy overrideExistingAsIfNotThere(long versionForIndexing) { + static IndexingStrategy overrideExistingAsIfNotThere() { return new IndexingStrategy(true, true, true, - false, versionForIndexing, null); + false, 1L, null); } public static IndexingStrategy processButSkipLucene(boolean currentNotFoundOrDeleted, long versionForIndexing) { @@ -1285,7 +1284,7 @@ public class InternalEngine extends Engine { } protected final DeletionStrategy planDeletionAsNonPrimary(Delete delete) throws IOException { - assertNonPrimaryOrigin(delete); + assert assertNonPrimaryOrigin(delete); maxSeqNoOfNonAppendOnlyOperations.updateAndGet(curr -> Math.max(delete.seqNo(), curr)); assert maxSeqNoOfNonAppendOnlyOperations.get() >= delete.seqNo() : "max_seqno of non-append-only was not updated;" + "max_seqno non-append-only [" + maxSeqNoOfNonAppendOnlyOperations.get() + "], seqno of delete [" + delete.seqNo() + "]"; @@ -1305,7 +1304,7 @@ public class InternalEngine extends Engine { } else { final OpVsLuceneDocStatus opVsLucene = compareOpToLuceneDocBasedOnSeqNo(delete); if (opVsLucene == OpVsLuceneDocStatus.OP_STALE_OR_EQUAL) { - plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, false, delete.version()); + plan = DeletionStrategy.processAsStaleOp(softDeleteEnabled, delete.version()); } else { plan = DeletionStrategy.processNormally(opVsLucene == OpVsLuceneDocStatus.LUCENE_DOC_NOT_FOUND, delete.version()); } @@ -1318,7 +1317,7 @@ public class InternalEngine extends Engine { return true; } - protected final DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { + private DeletionStrategy planDeletionAsPrimary(Delete delete) throws IOException { assert delete.origin() == Operation.Origin.PRIMARY : "planing as primary but got " + delete.origin(); // resolve operation from external to internal final VersionValue versionValue = resolveDocVersion(delete, delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO); @@ -1336,7 +1335,7 @@ public class InternalEngine extends Engine { if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && versionValue == null) { final VersionConflictEngineException e = new VersionConflictEngineException(shardId, delete.id(), delete.getIfSeqNo(), delete.getIfPrimaryTerm(), SequenceNumbers.UNASSIGNED_SEQ_NO, 0); - plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), currentlyDeleted); + plan = DeletionStrategy.skipDueToVersionConflict(e, currentVersion, getPrimaryTerm(), true); } else if (delete.getIfSeqNo() != SequenceNumbers.UNASSIGNED_SEQ_NO && ( versionValue.seqNo != delete.getIfSeqNo() || versionValue.term != delete.getIfPrimaryTerm() )) { @@ -1428,8 +1427,8 @@ public class InternalEngine extends Engine { return new DeletionStrategy(false, false, currentlyDeleted, versionOfDeletion, null); } - static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, boolean currentlyDeleted, long versionOfDeletion) { - return new DeletionStrategy(false, addStaleOpToLucene, currentlyDeleted, versionOfDeletion, null); + static DeletionStrategy processAsStaleOp(boolean addStaleOpToLucene, long versionOfDeletion) { + return new DeletionStrategy(false, addStaleOpToLucene, false, versionOfDeletion, null); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index e4dce8919cf..ce955903af4 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -234,7 +234,7 @@ final class LiveVersionMap implements ReferenceManager.RefreshListener, Accounta /** * Tracks bytes used by tombstones (deletes) */ - final AtomicLong ramBytesUsedTombstones = new AtomicLong(); + private final AtomicLong ramBytesUsedTombstones = new AtomicLong(); @Override public void beforeRefresh() throws IOException { diff --git a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java index c9550a61f9e..a3e86ab1606 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LuceneChangesSnapshot.java @@ -188,8 +188,7 @@ final class LuceneChangesSnapshot implements Translog.Snapshot { int readerIndex = 0; CombinedDocValues combinedDocValues = null; LeafReaderContext leaf = null; - for (int i = 0; i < scoreDocs.length; i++) { - ScoreDoc scoreDoc = scoreDocs[i]; + for (ScoreDoc scoreDoc : scoreDocs) { if (scoreDoc.doc >= docBase + maxDoc) { do { leaf = leaves.get(readerIndex++); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b981bdb8a84..b11fcbbd2ac 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -457,8 +457,8 @@ public class ReadOnlyEngine extends Engine { } - protected void processReaders(IndexReader reader, IndexReader previousReader) { - searcherFactory.processReaders(reader, previousReader); + protected void processReader(IndexReader reader) { + searcherFactory.processReaders(reader, null); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java b/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java deleted file mode 100644 index 31fddbedfb7..00000000000 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoveryCounter.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.engine; - -import org.elasticsearch.common.lease.Releasable; -import org.elasticsearch.index.store.Store; - -import java.util.concurrent.atomic.AtomicInteger; - -/** - * RecoveryCounter keeps tracks of the number of ongoing recoveries for a - * particular {@link Store} - */ -public class RecoveryCounter implements Releasable { - - private final Store store; - - RecoveryCounter(Store store) { - this.store = store; - } - - private final AtomicInteger onGoingRecoveries = new AtomicInteger(); - - void startRecovery() { - store.incRef(); - onGoingRecoveries.incrementAndGet(); - } - - public int get() { - return onGoingRecoveries.get(); - } - - /** - * End the recovery counter by decrementing the store's ref and the ongoing recovery counter - * @return number of ongoing recoveries remaining - */ - int endRecovery() { - store.decRef(); - int left = onGoingRecoveries.decrementAndGet(); - assert onGoingRecoveries.get() >= 0 : "ongoingRecoveries must be >= 0 but was: " + onGoingRecoveries.get(); - return left; - } - - @Override - public void close() { - endRecovery(); - } -} diff --git a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java index 42276f4ca21..a4221bf01f2 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/elasticsearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -58,8 +58,7 @@ final class RecoverySourcePruneMergePolicy extends OneMergeWrappingMergePolicy { }); } - // pkg private for testing - static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) + private static CodecReader wrapReader(String recoverySourceField, CodecReader reader, Supplier retainSourceQuerySupplier) throws IOException { NumericDocValues recoverySource = reader.getNumericDocValues(recoverySourceField); if (recoverySource == null || recoverySource.nextDoc() == DocIdSetIterator.NO_MORE_DOCS) { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index fa15e7dc09e..f10f9a4b5fc 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -40,6 +40,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; public class Segment implements Streamable { @@ -94,10 +95,6 @@ public class Segment implements Streamable { return new ByteSizeValue(sizeInBytes); } - public long getSizeInBytes() { - return this.sizeInBytes; - } - public org.apache.lucene.util.Version getVersion() { return version; } @@ -145,9 +142,8 @@ public class Segment implements Streamable { Segment segment = (Segment) o; - if (name != null ? !name.equals(segment.name) : segment.name != null) return false; + return Objects.equals(name, segment.name); - return true; } @Override @@ -220,7 +216,7 @@ public class Segment implements Streamable { } } - Sort readSegmentSort(StreamInput in) throws IOException { + private Sort readSegmentSort(StreamInput in) throws IOException { int size = in.readVInt(); if (size == 0) { return null; @@ -271,7 +267,7 @@ public class Segment implements Streamable { return new Sort(fields); } - void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { + private void writeSegmentSort(StreamOutput out, Sort sort) throws IOException { if (sort == null) { out.writeVInt(0); return; @@ -311,14 +307,14 @@ public class Segment implements Streamable { } } - Accountable readRamTree(StreamInput in) throws IOException { + private Accountable readRamTree(StreamInput in) throws IOException { final String name = in.readString(); final long bytes = in.readVLong(); int numChildren = in.readVInt(); if (numChildren == 0) { return Accountables.namedAccountable(name, bytes); } - List children = new ArrayList(numChildren); + List children = new ArrayList<>(numChildren); while (numChildren-- > 0) { children.add(readRamTree(in)); } @@ -326,7 +322,7 @@ public class Segment implements Streamable { } // the ram tree is written recursively since the depth is fairly low (5 or 6) - void writeRamTree(StreamOutput out, Accountable tree) throws IOException { + private void writeRamTree(StreamOutput out, Accountable tree) throws IOException { out.writeString(tree.toString()); out.writeVLong(tree.ramBytesUsed()); Collection children = tree.getChildResources(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 2d22a6f3caf..ae78de57453 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -30,7 +30,6 @@ import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.util.Iterator; public class SegmentsStats implements Streamable, Writeable, ToXContentFragment { @@ -54,7 +53,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment * Ideally this should be in sync to what the current version of Lucene is using, but it's harmless to leave extensions out, * they'll just miss a proper description in the stats */ - private static ImmutableOpenMap fileDescriptions = ImmutableOpenMap.builder() + private static final ImmutableOpenMap FILE_DESCRIPTIONS = ImmutableOpenMap.builder() .fPut("si", "Segment Info") .fPut("fnm", "Fields") .fPut("fdx", "Field Index") @@ -150,8 +149,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment public void addFileSizes(ImmutableOpenMap fileSizes) { ImmutableOpenMap.Builder map = ImmutableOpenMap.builder(this.fileSizes); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { if (map.containsKey(entry.key)) { Long oldValue = map.get(entry.key); map.put(entry.key, oldValue + entry.value); @@ -206,7 +204,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment return this.termsMemoryInBytes; } - public ByteSizeValue getTermsMemory() { + private ByteSizeValue getTermsMemory() { return new ByteSizeValue(termsMemoryInBytes); } @@ -217,7 +215,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment return this.storedFieldsMemoryInBytes; } - public ByteSizeValue getStoredFieldsMemory() { + private ByteSizeValue getStoredFieldsMemory() { return new ByteSizeValue(storedFieldsMemoryInBytes); } @@ -228,7 +226,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment return this.termVectorsMemoryInBytes; } - public ByteSizeValue getTermVectorsMemory() { + private ByteSizeValue getTermVectorsMemory() { return new ByteSizeValue(termVectorsMemoryInBytes); } @@ -239,7 +237,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment return this.normsMemoryInBytes; } - public ByteSizeValue getNormsMemory() { + private ByteSizeValue getNormsMemory() { return new ByteSizeValue(normsMemoryInBytes); } @@ -250,7 +248,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment return this.pointsMemoryInBytes; } - public ByteSizeValue getPointsMemory() { + private ByteSizeValue getPointsMemory() { return new ByteSizeValue(pointsMemoryInBytes); } @@ -261,7 +259,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment return this.docValuesMemoryInBytes; } - public ByteSizeValue getDocValuesMemory() { + private ByteSizeValue getDocValuesMemory() { return new ByteSizeValue(docValuesMemoryInBytes); } @@ -326,11 +324,10 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment builder.humanReadableField(Fields.FIXED_BIT_SET_MEMORY_IN_BYTES, Fields.FIXED_BIT_SET, getBitsetMemory()); builder.field(Fields.MAX_UNSAFE_AUTO_ID_TIMESTAMP, maxUnsafeAutoIdTimestamp); builder.startObject(Fields.FILE_SIZES); - for (Iterator> it = fileSizes.iterator(); it.hasNext();) { - ObjectObjectCursor entry = it.next(); + for (ObjectObjectCursor entry : fileSizes) { builder.startObject(entry.key); builder.humanReadableField(Fields.SIZE_IN_BYTES, Fields.SIZE, new ByteSizeValue(entry.value)); - builder.field(Fields.DESCRIPTION, fileDescriptions.getOrDefault(entry.key, "Others")); + builder.field(Fields.DESCRIPTION, FILE_DESCRIPTIONS.getOrDefault(entry.key, "Others")); builder.endObject(); } builder.endObject(); @@ -391,7 +388,7 @@ public class SegmentsStats implements Streamable, Writeable, ToXContentFragment out.writeVInt(fileSizes.size()); for (ObjectObjectCursor entry : fileSizes) { out.writeString(entry.key); - out.writeLong(entry.value.longValue()); + out.writeLong(entry.value); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java index f669139c07e..d858ccb0ab6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SnapshotFailedEngineException.java @@ -20,17 +20,12 @@ package org.elasticsearch.index.engine; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.index.shard.ShardId; import java.io.IOException; public class SnapshotFailedEngineException extends EngineException { - public SnapshotFailedEngineException(ShardId shardId, Throwable cause) { - super(shardId, "Snapshot failed", cause); - } - public SnapshotFailedEngineException(StreamInput in) throws IOException{ super(in); } -} \ No newline at end of file +} diff --git a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java index c7e11e85f7d..d40e7d04e3e 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/elasticsearch/index/engine/TranslogLeafReader.java @@ -35,7 +35,6 @@ import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Terms; import org.apache.lucene.util.Bits; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.index.mapper.IdFieldMapper; import org.elasticsearch.index.mapper.RoutingFieldMapper; import org.elasticsearch.index.mapper.SourceFieldMapper; @@ -61,11 +60,9 @@ final class TranslogLeafReader extends LeafReader { private static final FieldInfo FAKE_ID_FIELD = new FieldInfo(IdFieldMapper.NAME, 3, false, false, false, IndexOptions.NONE, DocValuesType.NONE, -1, Collections.emptyMap(), 0, 0, 0, false); - private final Version indexVersionCreated; - TranslogLeafReader(Translog.Index operation, Version indexVersionCreated) { + TranslogLeafReader(Translog.Index operation) { this.operation = operation; - this.indexVersionCreated = indexVersionCreated; } @Override public CacheHelper getCoreCacheHelper() { @@ -161,14 +158,9 @@ final class TranslogLeafReader extends LeafReader { visitor.stringField(FAKE_ROUTING_FIELD, operation.routing().getBytes(StandardCharsets.UTF_8)); } if (visitor.needsField(FAKE_ID_FIELD) == StoredFieldVisitor.Status.YES) { - final byte[] id; - if (indexVersionCreated.onOrAfter(Version.V_6_0_0)) { - BytesRef bytesRef = Uid.encodeId(operation.id()); - id = new byte[bytesRef.length]; - System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length); - } else { // TODO this can go away in 7.0 after backport - id = operation.id().getBytes(StandardCharsets.UTF_8); - } + BytesRef bytesRef = Uid.encodeId(operation.id()); + final byte[] id = new byte[bytesRef.length]; + System.arraycopy(bytesRef.bytes, bytesRef.offset, id, 0, bytesRef.length); visitor.stringField(FAKE_ID_FIELD, id); } } diff --git a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java index 0f6c217409c..c869e2bc386 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java +++ b/server/src/main/java/org/elasticsearch/index/engine/VersionConflictEngineException.java @@ -42,11 +42,7 @@ public class VersionConflictEngineException extends EngineException { } public VersionConflictEngineException(ShardId shardId, String id, String explanation) { - this(shardId, null, id, explanation); - } - - public VersionConflictEngineException(ShardId shardId, Throwable cause, String id, String explanation) { - this(shardId, "[{}]: version conflict, {}", cause, id, explanation); + this(shardId, "[{}]: version conflict, {}", null, id, explanation); } public VersionConflictEngineException(ShardId shardId, String msg, Throwable cause, Object... params) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java index e9b57e316cc..50f1125b275 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/index/engine/FrozenEngine.java @@ -169,7 +169,7 @@ public final class FrozenEngine extends ReadOnlyEngine { listeners.beforeRefresh(); } reader = DirectoryReader.open(engineConfig.getStore().directory()); - processReaders(reader, null); + processReader(reader); reader = lastOpenedReader = wrapReader(reader, Function.identity()); reader.getReaderCacheHelper().addClosedListener(this::onReaderClosed); for (ReferenceManager.RefreshListener listeners : config ().getInternalRefreshListener()) { From 49767fc1e9b4d9fead55aac0c47df1b41565aca4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:28:12 +0200 Subject: [PATCH 03/40] Some Cleanup in o.e.gateway Package (#42108) (#42568) * Removing obvious dead code * Removing redundant listener interface --- .../gateway/BaseGatewayShardAllocator.java | 2 +- .../gateway/DanglingIndicesState.java | 5 ++-- .../org/elasticsearch/gateway/Gateway.java | 6 +---- .../gateway/GatewayException.java | 8 ------ .../gateway/GatewayMetaState.java | 25 ++++++----------- .../elasticsearch/gateway/GatewayService.java | 5 ++-- .../gateway/LocalAllocateDangledIndices.java | 13 ++------- .../gateway/MetaStateService.java | 3 +-- .../gateway/PrimaryShardAllocator.java | 8 +++--- .../gateway/PriorityComparator.java | 4 +-- .../gateway/ReplicaShardAllocator.java | 14 +++++----- .../TransportNodesListGatewayMetaState.java | 22 --------------- ...ransportNodesListGatewayStartedShards.java | 11 +++----- .../java/org/elasticsearch/node/Node.java | 4 +-- .../gateway/GatewayServiceTests.java | 3 +-- .../gateway/MockGatewayMetaState.java | 6 ++--- .../indices/IndicesServiceTests.java | 27 +++++-------------- 17 files changed, 46 insertions(+), 120 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index 7e4172961ea..d8b96550ad0 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -93,7 +93,7 @@ public abstract class BaseGatewayShardAllocator { * Builds decisions for all nodes in the cluster, so that the explain API can provide information on * allocation decisions for each node, while still waiting to allocate the shard (e.g. due to fetching shard data). */ - protected List buildDecisionsForAllNodes(ShardRouting shard, RoutingAllocation allocation) { + protected static List buildDecisionsForAllNodes(ShardRouting shard, RoutingAllocation allocation) { List results = new ArrayList<>(); for (RoutingNode node : allocation.routingNodes()) { Decision decision = allocation.deciders().canAllocate(shard, node, allocation); diff --git a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java index 4d7949cdf4d..1b6beeddaca 100644 --- a/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java +++ b/server/src/main/java/org/elasticsearch/gateway/DanglingIndicesState.java @@ -22,6 +22,7 @@ package org.elasticsearch.gateway; import com.carrotsearch.hppc.cursors.ObjectCursor; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.IndexGraveyard; @@ -163,14 +164,14 @@ public class DanglingIndicesState implements ClusterStateListener { } try { allocateDangledIndices.allocateDangled(Collections.unmodifiableCollection(new ArrayList<>(danglingIndices.values())), - new LocalAllocateDangledIndices.Listener() { + new ActionListener() { @Override public void onResponse(LocalAllocateDangledIndices.AllocateDangledResponse response) { logger.trace("allocated dangled"); } @Override - public void onFailure(Throwable e) { + public void onFailure(Exception e) { logger.info("failed to send allocated dangled", e); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/Gateway.java b/server/src/main/java/org/elasticsearch/gateway/Gateway.java index 317bf63a4a6..53c067c1b0c 100644 --- a/server/src/main/java/org/elasticsearch/gateway/Gateway.java +++ b/server/src/main/java/org/elasticsearch/gateway/Gateway.java @@ -31,7 +31,6 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.discovery.zen.ElectMasterService; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import java.util.Arrays; import java.util.function.Function; @@ -45,12 +44,9 @@ public class Gateway { private final TransportNodesListGatewayMetaState listGatewayMetaState; private final int minimumMasterNodes; - private final IndicesService indicesService; public Gateway(final Settings settings, final ClusterService clusterService, - final TransportNodesListGatewayMetaState listGatewayMetaState, - final IndicesService indicesService) { - this.indicesService = indicesService; + final TransportNodesListGatewayMetaState listGatewayMetaState) { this.clusterService = clusterService; this.listGatewayMetaState = listGatewayMetaState; this.minimumMasterNodes = ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayException.java b/server/src/main/java/org/elasticsearch/gateway/GatewayException.java index 32050f1c10e..380610a5936 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayException.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayException.java @@ -26,14 +26,6 @@ import java.io.IOException; public class GatewayException extends ElasticsearchException { - public GatewayException(String msg) { - super(msg); - } - - public GatewayException(String msg, Throwable cause) { - super(msg, cause); - } - public GatewayException(StreamInput in) throws IOException { super(in); } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 30361fa70ee..91bcb68370e 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -44,9 +44,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.index.Index; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.MetaDataUpgrader; import org.elasticsearch.transport.TransportService; @@ -76,11 +74,9 @@ import java.util.function.UnaryOperator; public class GatewayMetaState implements ClusterStateApplier, CoordinationState.PersistedState { protected static final Logger logger = LogManager.getLogger(GatewayMetaState.class); - private final NodeEnvironment nodeEnv; private final MetaStateService metaStateService; private final Settings settings; private final ClusterService clusterService; - private final IndicesService indicesService; private final TransportService transportService; //there is a single thread executing updateClusterState calls, hence no volatile modifier @@ -88,16 +84,13 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. protected ClusterState previousClusterState; protected boolean incrementalWrite; - public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService, + public GatewayMetaState(Settings settings, MetaStateService metaStateService, MetaDataIndexUpgradeService metaDataIndexUpgradeService, MetaDataUpgrader metaDataUpgrader, - TransportService transportService, ClusterService clusterService, - IndicesService indicesService) throws IOException { + TransportService transportService, ClusterService clusterService) throws IOException { this.settings = settings; - this.nodeEnv = nodeEnv; this.metaStateService = metaStateService; this.transportService = transportService; this.clusterService = clusterService; - this.indicesService = indicesService; upgradeMetaData(metaDataIndexUpgradeService, metaDataUpgrader); initializeClusterState(ClusterName.CLUSTER_NAME_SETTING.get(settings)); @@ -184,7 +177,7 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. } } - protected boolean isMasterOrDataNode() { + private boolean isMasterOrDataNode() { return DiscoveryNode.isMasterNode(settings) || DiscoveryNode.isDataNode(settings); } @@ -312,13 +305,12 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. } } - long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { + void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { assert finished == false : FINISHED_MSG; try { - long generation = metaStateService.writeManifestAndCleanup(reason, manifest); + metaStateService.writeManifestAndCleanup(reason, manifest); commitCleanupActions.forEach(Runnable::run); finished = true; - return generation; } catch (WriteStateException e) { // if Manifest write results in dirty WriteStateException it's not safe to remove // new metadata files, because if Manifest was actually written to disk and its deletion @@ -346,7 +338,7 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. * * @throws WriteStateException if exception occurs. See also {@link WriteStateException#isDirty()}. */ - protected void updateClusterState(ClusterState newState, ClusterState previousState) + private void updateClusterState(ClusterState newState, ClusterState previousState) throws WriteStateException { MetaData newMetaData = newState.metaData(); @@ -406,7 +398,7 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. } private static boolean isDataOnlyNode(ClusterState state) { - return ((state.nodes().getLocalNode().isMasterNode() == false) && state.nodes().getLocalNode().isDataNode()); + return state.nodes().getLocalNode().isMasterNode() == false && state.nodes().getLocalNode().isDataNode(); } /** @@ -535,8 +527,7 @@ public class GatewayMetaState implements ClusterStateApplier, CoordinationState. } private static Set getRelevantIndicesForMasterEligibleNode(ClusterState state) { - Set relevantIndices; - relevantIndices = new HashSet<>(); + Set relevantIndices = new HashSet<>(); // we have to iterate over the metadata to make sure we also capture closed indices for (IndexMetaData indexMetaData : state.metaData()) { relevantIndices.add(indexMetaData.getIndex()); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index a8960387e6f..85a9c448991 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -41,7 +41,6 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.discovery.Discovery; import org.elasticsearch.discovery.zen.ZenDiscovery; -import org.elasticsearch.indices.IndicesService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; @@ -94,7 +93,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste public GatewayService(final Settings settings, final AllocationService allocationService, final ClusterService clusterService, final ThreadPool threadPool, final TransportNodesListGatewayMetaState listGatewayMetaState, - final IndicesService indicesService, final Discovery discovery) { + final Discovery discovery) { this.allocationService = allocationService; this.clusterService = clusterService; this.threadPool = threadPool; @@ -125,7 +124,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste recoveryRunnable = () -> clusterService.submitStateUpdateTask("local-gateway-elected-state", new RecoverStateUpdateTask()); } else { - final Gateway gateway = new Gateway(settings, clusterService, listGatewayMetaState, indicesService); + final Gateway gateway = new Gateway(settings, clusterService, listGatewayMetaState); recoveryRunnable = () -> gateway.performStateRecovery(new GatewayRecoveryListener()); } diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index a5f4f77da43..b51d16dbc51 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -23,6 +23,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateUpdateTask; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -76,7 +77,7 @@ public class LocalAllocateDangledIndices { new AllocateDangledRequestHandler()); } - public void allocateDangled(Collection indices, final Listener listener) { + public void allocateDangled(Collection indices, ActionListener listener) { ClusterState clusterState = clusterService.state(); DiscoveryNode masterNode = clusterState.nodes().getMasterNode(); if (masterNode == null) { @@ -110,12 +111,6 @@ public class LocalAllocateDangledIndices { }); } - public interface Listener { - void onResponse(AllocateDangledResponse response); - - void onFailure(Throwable e); - } - class AllocateDangledRequestHandler implements TransportRequestHandler { @Override public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel, Task task) throws Exception { @@ -257,10 +252,6 @@ public class LocalAllocateDangledIndices { this.ack = ack; } - public boolean ack() { - return ack; - } - @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 7bef4d5819e..db1a4fc77b9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -198,12 +198,11 @@ public class MetaStateService { * * @throws WriteStateException if exception when writing state occurs. See also {@link WriteStateException#isDirty()} */ - public long writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { + public void writeManifestAndCleanup(String reason, Manifest manifest) throws WriteStateException { logger.trace("[_meta] writing state, reason [{}]", reason); try { long generation = MANIFEST_FORMAT.writeAndCleanup(manifest, nodeEnv.nodeDataPaths()); logger.trace("[_meta] state written (generation: {})", generation); - return generation; } catch (WriteStateException ex) { throw new WriteStateException(ex.isDirty(), "[_meta]: failed to write meta state", ex); } diff --git a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java index 79030336acc..d2e82d092e6 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PrimaryShardAllocator.java @@ -297,10 +297,10 @@ public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator { /** * Split the list of node shard states into groups yes/no/throttle based on allocation deciders */ - private NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, - List nodeShardStates, - ShardRouting shardRouting, - boolean forceAllocate) { + private static NodesToAllocate buildNodesToAllocate(RoutingAllocation allocation, + List nodeShardStates, + ShardRouting shardRouting, + boolean forceAllocate) { List yesNodeShards = new ArrayList<>(); List throttledNodeShards = new ArrayList<>(); List noNodeShards = new ArrayList<>(); diff --git a/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java b/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java index 1d24baf561a..60bdc2434e9 100644 --- a/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java +++ b/server/src/main/java/org/elasticsearch/gateway/PriorityComparator.java @@ -56,11 +56,11 @@ public abstract class PriorityComparator implements Comparator { return cmp; } - private int priority(Settings settings) { + private static int priority(Settings settings) { return IndexMetaData.INDEX_PRIORITY_SETTING.get(settings); } - private long timeCreated(Settings settings) { + private static long timeCreated(Settings settings) { return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L); } diff --git a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java index 10bd6115b4c..ce3cde3e6db 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/ReplicaShardAllocator.java @@ -243,8 +243,8 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { * YES or THROTTLE). If in explain mode, also returns the node-level explanations as the second element * in the returned tuple. */ - private Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, - RoutingAllocation allocation) { + private static Tuple> canBeAllocatedToAtLeastOneNode(ShardRouting shard, + RoutingAllocation allocation) { Decision madeDecision = Decision.NO; final boolean explain = allocation.debugDecision(); Map nodeDecisions = explain ? new HashMap<>() : null; @@ -260,7 +260,7 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { if (explain) { madeDecision = decision; } else { - return Tuple.tuple(decision, nodeDecisions); + return Tuple.tuple(decision, null); } } else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) { madeDecision = decision; @@ -276,8 +276,8 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { * Takes the store info for nodes that have a shard store and adds them to the node decisions, * leaving the node explanations untouched for those nodes that do not have any store information. */ - private List augmentExplanationsWithStoreInfo(Map nodeDecisions, - Map withShardStores) { + private static List augmentExplanationsWithStoreInfo(Map nodeDecisions, + Map withShardStores) { if (nodeDecisions == null || withShardStores == null) { return null; } @@ -295,8 +295,8 @@ public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator { /** * Finds the store for the assigned shard in the fetched data, returns null if none is found. */ - private TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, - AsyncShardFetch.FetchResult data) { + private static TransportNodesListShardStoreMetaData.StoreFilesMetaData findStore(ShardRouting shard, RoutingAllocation allocation, + AsyncShardFetch.FetchResult data) { assert shard.currentNodeId() != null; DiscoveryNode primaryNode = allocation.nodes().get(shard.currentNodeId()); if (primaryNode == null) { diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java index 477961c8a6d..ab0fad88ecd 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayMetaState.java @@ -94,23 +94,10 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction { - NodesGatewayMetaState() { - } - public NodesGatewayMetaState(ClusterName clusterName, List nodes, List failures) { super(clusterName, nodes, failures); } @@ -135,15 +122,6 @@ public class TransportNodesListGatewayMetaState extends TransportNodesAction Date: Mon, 27 May 2019 11:28:50 +0200 Subject: [PATCH 04/40] Cleanup Redundant BlobStoreFormat Class (#42195) (#42570) * No need to have an abstract class here when there's only a single impl. --- .../blobstore/BlobStoreFormat.java | 119 ------------------ .../blobstore/BlobStoreRepository.java | 3 - .../blobstore/ChecksumBlobStoreFormat.java | 92 ++++++++++---- 3 files changed, 71 insertions(+), 143 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java deleted file mode 100644 index dc9f8092e3f..00000000000 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreFormat.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.repositories.blobstore; - -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.common.CheckedFunction; -import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.common.xcontent.ToXContent; -import org.elasticsearch.common.xcontent.XContentHelper; -import org.elasticsearch.common.xcontent.XContentParser; -import org.elasticsearch.snapshots.SnapshotInfo; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; - -/** - * Base class that handles serialization of various data structures during snapshot/restore operations. - */ -public abstract class BlobStoreFormat { - - protected final String blobNameFormat; - - protected final CheckedFunction reader; - - protected final NamedXContentRegistry namedXContentRegistry; - - // Serialization parameters to specify correct context for metadata serialization - protected static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; - - static { - Map snapshotOnlyParams = new HashMap<>(); - // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot - // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT - snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); - // serialize SnapshotInfo using the SNAPSHOT mode - snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); - SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); - } - - /** - * @param blobNameFormat format of the blobname in {@link String#format(Locale, String, Object...)} format - * @param reader the prototype object that can deserialize objects with type T - */ - protected BlobStoreFormat(String blobNameFormat, CheckedFunction reader, - NamedXContentRegistry namedXContentRegistry) { - this.reader = reader; - this.blobNameFormat = blobNameFormat; - this.namedXContentRegistry = namedXContentRegistry; - } - - /** - * Reads and parses the blob with given blob name. - * - * @param blobContainer blob container - * @param blobName blob name - * @return parsed blob object - */ - public abstract T readBlob(BlobContainer blobContainer, String blobName) throws IOException; - - /** - * Reads and parses the blob with given name, applying name translation using the {link #blobName} method - * - * @param blobContainer blob container - * @param name name to be translated into - * @return parsed blob object - */ - public T read(BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - return readBlob(blobContainer, blobName); - } - - /** - * Deletes obj in the blob container - */ - public void delete(BlobContainer blobContainer, String name) throws IOException { - blobContainer.deleteBlob(blobName(name)); - } - - /** - * Checks obj in the blob container - */ - public boolean exists(BlobContainer blobContainer, String name) { - return blobContainer.blobExists(blobName(name)); - } - - public String blobName(String name) { - return String.format(Locale.ROOT, blobNameFormat, name); - } - - protected T read(BytesReference bytes) throws IOException { - try (XContentParser parser = XContentHelper - .createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, bytes)) { - T obj = reader.apply(parser); - return obj; - } - } - -} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 998c4f2db52..22567f1313c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -392,9 +392,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } - if (snapshotFormat.exists(blobContainer(), snapshotId.getUUID())) { - throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); - } // Write Global MetaData globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID()); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index ca6ec74dc2c..16751399a18 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; @@ -33,24 +34,43 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.IndexOutputOutputStream; +import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.core.internal.io.Streams; import org.elasticsearch.gateway.CorruptStateException; +import org.elasticsearch.snapshots.SnapshotInfo; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; /** * Snapshot metadata file format used in v2.0 and above */ -public class ChecksumBlobStoreFormat extends BlobStoreFormat { +public final class ChecksumBlobStoreFormat { + + // Serialization parameters to specify correct context for metadata serialization + private static final ToXContent.Params SNAPSHOT_ONLY_FORMAT_PARAMS; + + static { + Map snapshotOnlyParams = new HashMap<>(); + // when metadata is serialized certain elements of the metadata shouldn't be included into snapshot + // exclusion of these elements is done by setting MetaData.CONTEXT_MODE_PARAM to MetaData.CONTEXT_MODE_SNAPSHOT + snapshotOnlyParams.put(MetaData.CONTEXT_MODE_PARAM, MetaData.CONTEXT_MODE_SNAPSHOT); + // serialize SnapshotInfo using the SNAPSHOT mode + snapshotOnlyParams.put(SnapshotInfo.CONTEXT_MODE_PARAM, SnapshotInfo.CONTEXT_MODE_SNAPSHOT); + SNAPSHOT_ONLY_FORMAT_PARAMS = new ToXContent.MapParams(snapshotOnlyParams); + } private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; @@ -59,12 +79,18 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm private static final int BUFFER_SIZE = 4096; - protected final XContentType xContentType; + private final XContentType xContentType; - protected final boolean compress; + private final boolean compress; private final String codec; + private final String blobNameFormat; + + private final CheckedFunction reader; + + private final NamedXContentRegistry namedXContentRegistry; + /** * @param codec codec name * @param blobNameFormat format of the blobname in {@link String#format} format @@ -74,7 +100,9 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm */ public ChecksumBlobStoreFormat(String codec, String blobNameFormat, CheckedFunction reader, NamedXContentRegistry namedXContentRegistry, boolean compress, XContentType xContentType) { - super(blobNameFormat, reader, namedXContentRegistry); + this.reader = reader; + this.blobNameFormat = blobNameFormat; + this.namedXContentRegistry = namedXContentRegistry; this.xContentType = xContentType; this.compress = compress; this.codec = codec; @@ -91,6 +119,29 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm this(codec, blobNameFormat, reader, namedXContentRegistry, compress, DEFAULT_X_CONTENT_TYPE); } + /** + * Reads and parses the blob with given name, applying name translation using the {link #blobName} method + * + * @param blobContainer blob container + * @param name name to be translated into + * @return parsed blob object + */ + public T read(BlobContainer blobContainer, String name) throws IOException { + String blobName = blobName(name); + return readBlob(blobContainer, blobName); + } + + /** + * Deletes obj in the blob container + */ + public void delete(BlobContainer blobContainer, String name) throws IOException { + blobContainer.deleteBlob(blobName(name)); + } + + public String blobName(String name) { + return String.format(Locale.ROOT, blobNameFormat, name); + } + /** * Reads blob with specified name without resolving the blobName using using {@link #blobName} method. * @@ -108,8 +159,10 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm CodecUtil.checkHeader(indexInput, codec, VERSION, VERSION); long filePointer = indexInput.getFilePointer(); long contentSize = indexInput.length() - CodecUtil.footerLength() - filePointer; - BytesReference bytesReference = new BytesArray(bytes, (int) filePointer, (int) contentSize); - return read(bytesReference); + try (XContentParser parser = XContentHelper.createParser(namedXContentRegistry, LoggingDeprecationHandler.INSTANCE, + new BytesArray(bytes, (int) filePointer, (int) contentSize))) { + return reader.apply(parser); + } } catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) { // we trick this into a dedicated exception with the original stacktrace throw new CorruptStateException(ex); @@ -156,7 +209,17 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm } private void writeTo(final T obj, final String blobName, final CheckedConsumer consumer) throws IOException { - final BytesReference bytes = write(obj); + final BytesReference bytes; + try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { + if (compress) { + try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { + write(obj, compressedStreamOutput); + } + } else { + write(obj, bytesStreamOutput); + } + bytes = bytesStreamOutput.bytes(); + } try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) { @@ -176,20 +239,7 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm } } - protected BytesReference write(T obj) throws IOException { - try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { - if (compress) { - try (StreamOutput compressedStreamOutput = CompressorFactory.COMPRESSOR.streamOutput(bytesStreamOutput)) { - write(obj, compressedStreamOutput); - } - } else { - write(obj, bytesStreamOutput); - } - return bytesStreamOutput.bytes(); - } - } - - protected void write(T obj, StreamOutput streamOutput) throws IOException { + private void write(T obj, StreamOutput streamOutput) throws IOException { try (XContentBuilder builder = XContentFactory.contentBuilder(xContentType, streamOutput)) { builder.startObject(); obj.toXContent(builder, SNAPSHOT_ONLY_FORMAT_PARAMS); From 489616da6255c830f69aa36e6117011081db0503 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:39:59 +0200 Subject: [PATCH 05/40] Fix testTracerLog Network Tests (#42286) (#42565) * Fix testTracerLog Network Tests * Start appender before using it like we do for e.g. the Netty leak detection appender to avoid interference from actions on the network threads that might still be dangling from previous tests in the same suite * Closes #41890 --- .../transport/AbstractSimpleTransportTestCase.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 0985054f5b6..0483192a37a 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -1047,10 +1047,9 @@ public abstract class AbstractSimpleTransportTestCase extends ESTestCase { .build()); MockLogAppender appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); try { appender.start(); - + Loggers.addAppender(LogManager.getLogger("org.elasticsearch.transport.TransportService.tracer"), appender); final String requestSent = ".*\\[internal:test].*sent to.*\\{TS_B}.*"; final MockLogAppender.LoggingExpectation requestSentExpectation = new MockLogAppender.PatternSeenEventExpectation( From b68358945f2d3818bf9d7fae80923520b857cde2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:44:36 +0200 Subject: [PATCH 06/40] Dump Stacktrace on Slow IO-Thread Operations (#42000) (#42572) * Dump Stacktrace on Slow IO-Thread Operations * Follow up to #39729 extending the functionality to actually dump the stack when the thread is blocked not afterwards * Logging the stacktrace after the thread became unblocked is only of limited use because we don't know what happened in the slow callback from that (only whether we were blocked on a read,write,connect etc.) * Relates #41745 --- .../transport/nio/MockNioTransport.java | 71 ++++++++++- .../transport/nio/TestEventHandler.java | 114 ++++++++++-------- .../transport/nio/TestEventHandlerTests.java | 13 +- 3 files changed, 143 insertions(+), 55 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index 39316ca9192..a261d68cbb3 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -31,6 +31,7 @@ import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.recycler.Recycler; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.PageCacheRecycler; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.nio.BytesChannelContext; @@ -57,11 +58,16 @@ import java.net.StandardSocketOptions; import java.nio.ByteBuffer; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.util.Arrays; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; import java.util.function.IntFunction; +import java.util.stream.Collectors; import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory; @@ -70,6 +76,7 @@ public class MockNioTransport extends TcpTransport { private static final Logger logger = LogManager.getLogger(MockNioTransport.class); private final ConcurrentMap profileToChannelFactory = newConcurrentMap(); + private final TransportThreadWatchdog transportThreadWatchdog; private volatile NioSelectorGroup nioGroup; private volatile MockTcpChannelFactory clientChannelFactory; @@ -77,6 +84,7 @@ public class MockNioTransport extends TcpTransport { PageCacheRecycler pageCacheRecycler, NamedWriteableRegistry namedWriteableRegistry, CircuitBreakerService circuitBreakerService) { super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + this.transportThreadWatchdog = new TransportThreadWatchdog(threadPool); } @Override @@ -96,7 +104,7 @@ public class MockNioTransport extends TcpTransport { boolean success = false; try { nioGroup = new NioSelectorGroup(daemonThreadFactory(this.settings, TcpTransport.TRANSPORT_WORKER_THREAD_NAME_PREFIX), 2, - (s) -> new TestEventHandler(this::onNonChannelException, s, System::nanoTime)); + (s) -> new TestEventHandler(this::onNonChannelException, s, transportThreadWatchdog)); ProfileSettings clientProfileSettings = new ProfileSettings(settings, "default"); clientChannelFactory = new MockTcpChannelFactory(true, clientProfileSettings, "client"); @@ -125,6 +133,7 @@ public class MockNioTransport extends TcpTransport { @Override protected void stopInternal() { try { + transportThreadWatchdog.stop(); nioGroup.close(); } catch (Exception e) { logger.warn("unexpected exception while stopping nio group", e); @@ -319,4 +328,64 @@ public class MockNioTransport extends TcpTransport { getContext().sendMessage(BytesReference.toByteBuffers(reference), ActionListener.toBiConsumer(listener)); } } + + static final class TransportThreadWatchdog { + + private static final long WARN_THRESHOLD = TimeUnit.MILLISECONDS.toNanos(150); + + // Only check every 2s to not flood the logs on a blocked thread. + // We mostly care about long blocks and not random slowness anyway and in tests would randomly catch slow operations that block for + // less than 2s eventually. + private static final TimeValue CHECK_INTERVAL = TimeValue.timeValueSeconds(2); + + private final ThreadPool threadPool; + private final ConcurrentHashMap registry = new ConcurrentHashMap<>(); + + private volatile boolean stopped; + + TransportThreadWatchdog(ThreadPool threadPool) { + this.threadPool = threadPool; + threadPool.schedule(this::logLongRunningExecutions, CHECK_INTERVAL, ThreadPool.Names.GENERIC); + } + + public boolean register() { + Long previousValue = registry.put(Thread.currentThread(), threadPool.relativeTimeInNanos()); + return previousValue == null; + } + + public void unregister() { + Long previousValue = registry.remove(Thread.currentThread()); + assert previousValue != null; + maybeLogElapsedTime(previousValue); + } + + private void maybeLogElapsedTime(long startTime) { + long elapsedTime = threadPool.relativeTimeInNanos() - startTime; + if (elapsedTime > WARN_THRESHOLD) { + logger.warn( + new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", + TimeUnit.NANOSECONDS.toMillis(elapsedTime)), + new RuntimeException("Slow exception on network thread")); + } + } + + private void logLongRunningExecutions() { + for (Map.Entry entry : registry.entrySet()) { + final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue(); + if (elapsedTime > WARN_THRESHOLD) { + final Thread thread = entry.getKey(); + logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + TimeUnit.NANOSECONDS.toMillis(elapsedTime), + Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); + } + } + if (stopped == false) { + threadPool.scheduleUnlessShuttingDown(CHECK_INTERVAL, ThreadPool.Names.GENERIC, this::logLongRunningExecutions); + } + } + + public void stop() { + stopped = true; + } + } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java index a70ecb0c59e..069e19c3455 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/TestEventHandler.java @@ -19,9 +19,6 @@ package org.elasticsearch.transport.nio; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.nio.ChannelContext; import org.elasticsearch.nio.EventHandler; import org.elasticsearch.nio.NioSelector; @@ -32,185 +29,202 @@ import java.io.IOException; import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; -import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import java.util.function.LongSupplier; import java.util.function.Supplier; public class TestEventHandler extends EventHandler { - private static final Logger logger = LogManager.getLogger(TestEventHandler.class); - private final Set hasConnectedMap = Collections.newSetFromMap(new WeakHashMap<>()); private final Set hasConnectExceptionMap = Collections.newSetFromMap(new WeakHashMap<>()); - private final LongSupplier relativeNanosSupplier; + private final MockNioTransport.TransportThreadWatchdog transportThreadWatchdog; - TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, LongSupplier relativeNanosSupplier) { + TestEventHandler(Consumer exceptionHandler, Supplier selectorSupplier, + MockNioTransport.TransportThreadWatchdog transportThreadWatchdog) { super(exceptionHandler, selectorSupplier); - this.relativeNanosSupplier = relativeNanosSupplier; + this.transportThreadWatchdog = transportThreadWatchdog; } @Override protected void acceptChannel(ServerChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.acceptChannel(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void acceptException(ServerChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.acceptException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleRegistration(ChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleRegistration(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void registrationException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.registrationException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } public void handleConnect(SocketChannelContext context) throws IOException { assert hasConnectedMap.contains(context) == false : "handleConnect should only be called is a channel is not yet connected"; - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleConnect(context); if (context.isConnectComplete()) { hasConnectedMap.add(context); } } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } public void connectException(SocketChannelContext context, Exception e) { assert hasConnectExceptionMap.contains(context) == false : "connectException should only called at maximum once per channel"; + final boolean registered = transportThreadWatchdog.register(); hasConnectExceptionMap.add(context); - long startTime = relativeNanosSupplier.getAsLong(); try { super.connectException(context, e); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleRead(SocketChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleRead(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void readException(SocketChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.readException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleWrite(SocketChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleWrite(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void writeException(SocketChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.writeException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleTask(Runnable task) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleTask(task); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void taskException(Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.taskException(exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void handleClose(ChannelContext context) throws IOException { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.handleClose(context); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void closeException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.closeException(context, exception); } finally { - maybeLogElapsedTime(startTime); + if (registered) { + transportThreadWatchdog.unregister(); + } } } @Override protected void genericChannelException(ChannelContext context, Exception exception) { - long startTime = relativeNanosSupplier.getAsLong(); + final boolean registered = transportThreadWatchdog.register(); try { super.genericChannelException(context, exception); } finally { - maybeLogElapsedTime(startTime); - } - } - - private static final long WARN_THRESHOLD = 150; - - private void maybeLogElapsedTime(long startTime) { - long elapsedTime = TimeUnit.NANOSECONDS.toMillis(relativeNanosSupplier.getAsLong() - startTime); - if (elapsedTime > WARN_THRESHOLD) { - logger.warn(new ParameterizedMessage("Slow execution on network thread [{} milliseconds]", elapsedTime), - new RuntimeException("Slow exception on network thread")); + if (registered) { + transportThreadWatchdog.unregister(); + } } } } diff --git a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java index 2a570eb59b6..424d4922f02 100644 --- a/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java +++ b/test/framework/src/test/java/org/elasticsearch/transport/nio/TestEventHandlerTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.nio.ServerChannelContext; import org.elasticsearch.nio.SocketChannelContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.MockLogAppender; +import org.elasticsearch.threadpool.ThreadPool; import java.util.HashMap; import java.util.Map; @@ -34,6 +35,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; +import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; public class TestEventHandlerTests extends ESTestCase { @@ -43,12 +45,12 @@ public class TestEventHandlerTests extends ESTestCase { public void setUp() throws Exception { super.setUp(); appender = new MockLogAppender(); - Loggers.addAppender(LogManager.getLogger(TestEventHandler.class), appender); + Loggers.addAppender(LogManager.getLogger(MockNioTransport.class), appender); appender.start(); } public void tearDown() throws Exception { - Loggers.removeAppender(LogManager.getLogger(TestEventHandler.class), appender); + Loggers.removeAppender(LogManager.getLogger(MockNioTransport.class), appender); appender.stop(); super.tearDown(); } @@ -65,7 +67,10 @@ public class TestEventHandlerTests extends ESTestCase { } throw new IllegalStateException("Cannot update isStart"); }; - TestEventHandler eventHandler = new TestEventHandler((e) -> {}, () -> null, timeSupplier); + final ThreadPool threadPool = mock(ThreadPool.class); + doAnswer(i -> timeSupplier.getAsLong()).when(threadPool).relativeTimeInNanos(); + TestEventHandler eventHandler = + new TestEventHandler((e) -> {}, () -> null, new MockNioTransport.TransportThreadWatchdog(threadPool)); ServerChannelContext serverChannelContext = mock(ServerChannelContext.class); SocketChannelContext socketChannelContext = mock(SocketChannelContext.class); @@ -91,7 +96,7 @@ public class TestEventHandlerTests extends ESTestCase { for (Map.Entry> entry : tests.entrySet()) { String message = "*Slow execution on network thread*"; MockLogAppender.LoggingExpectation slowExpectation = - new MockLogAppender.SeenEventExpectation(entry.getKey(), TestEventHandler.class.getCanonicalName(), Level.WARN, message); + new MockLogAppender.SeenEventExpectation(entry.getKey(), MockNioTransport.class.getCanonicalName(), Level.WARN, message); appender.addExpectation(slowExpectation); entry.getValue().run(); appender.assertAllExpectationsMatched(); From 7b4d1ac35252e13f4856d46399b94159a05c702c Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 11:47:04 +0200 Subject: [PATCH 07/40] Remove Obsolete BwC Logic from BlobStoreRepository (#42193) (#42571) * Remove Obsolete BwC Logic from BlobStoreRepository * We can't restore 1.3.3 files anyway -> no point in doing the dance of computing a hash here * Some other minor+obvious cleanups --- .../blobstore/BlobStoreRepository.java | 43 +------------------ .../blobstore/FileRestoreContext.java | 40 +---------------- 2 files changed, 3 insertions(+), 80 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 22567f1313c..b71b472a253 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,8 +26,6 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -988,8 +986,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final Map blobs, final String reason) { final String indexGeneration = Integer.toString(fileListGeneration); - final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration); - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots); try { // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier @@ -1032,7 +1028,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp snapshotId, shardId, orphanedBlobs), e); } } catch (IOException e) { - String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]"; + String message = + "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]"; throw new IndexShardSnapshotFailedException(shardId, message, e); } } @@ -1169,16 +1166,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp List filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", - shardId, fileInfo.physicalName(), fileInfo.metadata()), e); - } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot @@ -1349,32 +1336,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, - Store.MetadataSnapshot snapshot) throws Exception { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } - private static final class PartSliceStream extends SlicedInputStream { private final BlobContainer container; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index f78ddab9ee4..3abe4d7b507 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -27,8 +27,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.shard.ShardId; @@ -127,17 +125,6 @@ public abstract class FileRestoreContext { final Map snapshotMetaData = new HashMap<>(); final Map fileInfos = new HashMap<>(); for (final BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(fileInfo, recoveryTargetMetadata); - } catch (Exception e) { - // if the index is broken we might not be able to read it - logger.warn(new ParameterizedMessage("[{}] Can't calculate hash from blog for file [{}] [{}]", shardId, - fileInfo.physicalName(), fileInfo.metadata()), e); - } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); } @@ -237,7 +224,7 @@ public abstract class FileRestoreContext { protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo); @SuppressWarnings("unchecked") - private Iterable concat(Store.RecoveryDiff diff) { + private static Iterable concat(Store.RecoveryDiff diff) { return Iterables.concat(diff.different, diff.missing); } @@ -276,29 +263,4 @@ public abstract class FileRestoreContext { } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private void maybeRecalculateMetadataHash(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) - throws IOException { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = fileInputStream(fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } } From 380f296631f49721ab85eac21946d3729087ac00 Mon Sep 17 00:00:00 2001 From: bellengao Date: Mon, 27 May 2019 17:47:46 +0800 Subject: [PATCH 08/40] Update script-fields.asciidoc (#42490) --- docs/reference/search/request/script-fields.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/search/request/script-fields.asciidoc b/docs/reference/search/request/script-fields.asciidoc index da5868ea7d6..1bd61e00481 100644 --- a/docs/reference/search/request/script-fields.asciidoc +++ b/docs/reference/search/request/script-fields.asciidoc @@ -33,7 +33,7 @@ GET /_search // CONSOLE // TEST[setup:sales] -Script fields can work on fields that are not stored (`my_field_name` in +Script fields can work on fields that are not stored (`price` in the above case), and allow to return custom values to be returned (the evaluated value of the script). From 381e100217d92f031a58f4052b0be81519c46984 Mon Sep 17 00:00:00 2001 From: Travis Steel Date: Mon, 27 May 2019 04:53:51 -0500 Subject: [PATCH 09/40] Fixed typo in docker.asciidoc (#42455) --- docs/reference/setup/install/docker.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 1fcc261d68e..9037a292168 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -332,7 +332,7 @@ data through a bind-mount: As a last resort, you can also force the container to mutate the ownership of any bind-mounts used for the <> through the -environment variable `TAKE_FILE_OWNERSHIP`. Inn this case, they will be owned by +environment variable `TAKE_FILE_OWNERSHIP`. In this case, they will be owned by uid:gid `1000:0` providing read/write access to the {es} process as required. -- From 5d3e381648e697e1403e92eefae03f843e2605d0 Mon Sep 17 00:00:00 2001 From: Ignacio Vera Date: Mon, 27 May 2019 12:05:17 +0200 Subject: [PATCH 10/40] mute test testClosedIndices (#42582) --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 1 + 1 file changed, 1 insertion(+) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 5c43f7e01ed..7ea5e0cdb35 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -1031,6 +1031,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { * This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version, * it verifies that the index exists and is replicated if the old version supports replication. */ + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42581") public void testClosedIndices() throws Exception { if (isRunningAgainstOldCluster()) { createIndex(index, Settings.builder() From a96606d9624da206cc82b2f6db89763d617433d2 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 12:08:20 +0200 Subject: [PATCH 11/40] Safer Wait for Snapshot Success in ClusterPrivilegeTests (#40943) (#42575) * Safer Wait for Snapshot Success in ClusterPrivilegeTests * The snapshot state returned by the API might become SUCCESS before it's fully removed from the cluster state. * We should fix this race in the transport API but it's not trivial and will be part of the incoming big round of refactoring the repository interaction, this added check fixes the test for now * closes #38030 --- .../elasticsearch/integration/ClusterPrivilegeTests.java | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java index 2ceb14a172f..384401edaf5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClusterPrivilegeTests.java @@ -6,12 +6,14 @@ package org.elasticsearch.integration; import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusResponse; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.client.Request; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.xpack.core.security.authc.support.Hasher; +import org.hamcrest.Matchers; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -139,7 +141,6 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { assertAccessIsDenied("user_d", "PUT", "/_cluster/settings", "{ \"transient\" : { \"search.default_search_timeout\": \"1m\" } }"); } - @AwaitsFix( bugUrl = "https://github.com/elastic/elasticsearch/issues/38030") public void testThatSnapshotAndRestore() throws Exception { String repoJson = Strings.toString(jsonBuilder().startObject().field("type", "fs").startObject("settings").field("location", repositoryLocation.toString()).endObject().endObject()); @@ -203,6 +204,11 @@ public class ClusterPrivilegeTests extends AbstractPrivilegeTestCase { assertBusy(() -> { SnapshotsStatusResponse response = client().admin().cluster().prepareSnapshotStatus(repo).setSnapshots(snapshot).get(); assertThat(response.getSnapshots().get(0).getState(), is(SnapshotsInProgress.State.SUCCESS)); + // The status of the snapshot in the repository can become SUCCESS before it is fully finalized in the cluster state so wait for + // it to disappear from the cluster state as well + SnapshotsInProgress snapshotsInProgress = + client().admin().cluster().state(new ClusterStateRequest()).get().getState().custom(SnapshotsInProgress.TYPE); + assertThat(snapshotsInProgress.entries(), Matchers.empty()); }); } } From bb7e8eb2fd72cb9ed88d6e5932c46e3c526afa75 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 12:08:45 +0200 Subject: [PATCH 12/40] Introduce ShardState Enum + Slight Cleanup SnapshotsInProgress (#41940) (#42573) * Added separate enum for the state of each shard, it was really confusing that we used the same enum for the state of the snapshot overall and the state of each individual shard * relates https://github.com/elastic/elasticsearch/pull/40943#issuecomment-488664150 * Shortened some obvious spots in equals method and saved a few lines via `computeIfAbsent` to make up for adding 50 new lines to this class --- .../TransportSnapshotsStatusAction.java | 1 - .../cluster/SnapshotsInProgress.java | 102 +++++++++++------- .../snapshots/SnapshotShardsService.java | 12 ++- .../snapshots/SnapshotsService.java | 21 ++-- .../cluster/SnapshotsInProgressTests.java | 11 +- .../SharedClusterSnapshotRestoreIT.java | 7 +- ...SnapshotsInProgressSerializationTests.java | 3 +- 7 files changed, 97 insertions(+), 60 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java index c2f0d3dd0c0..8430d1868c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java @@ -174,7 +174,6 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction implement public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - - SnapshotsInProgress that = (SnapshotsInProgress) o; - - if (!entries.equals(that.entries)) return false; - - return true; + return entries.equals(((SnapshotsInProgress) o).entries); } @Override @@ -208,18 +204,11 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement return snapshot.toString(); } - // package private for testing - ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { + private ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) { Map> waitingIndicesMap = new HashMap<>(); for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == State.WAITING) { - final String indexName = entry.key.getIndexName(); - List waitingShards = waitingIndicesMap.get(indexName); - if (waitingShards == null) { - waitingShards = new ArrayList<>(); - waitingIndicesMap.put(indexName, waitingShards); - } - waitingShards.add(entry.key); + if (entry.value.state() == ShardState.WAITING) { + waitingIndicesMap.computeIfAbsent(entry.key.getIndexName(), k -> new ArrayList<>()).add(entry.key); } } if (waitingIndicesMap.isEmpty()) { @@ -241,28 +230,27 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement */ public static boolean completed(ObjectContainer shards) { for (ObjectCursor status : shards) { - if (status.value.state().completed() == false) { + if (status.value.state().completed == false) { return false; } } return true; } - public static class ShardSnapshotStatus { - private final State state; + private final ShardState state; private final String nodeId; private final String reason; public ShardSnapshotStatus(String nodeId) { - this(nodeId, State.INIT); + this(nodeId, ShardState.INIT); } - public ShardSnapshotStatus(String nodeId, State state) { + public ShardSnapshotStatus(String nodeId, ShardState state) { this(nodeId, state, null); } - public ShardSnapshotStatus(String nodeId, State state, String reason) { + public ShardSnapshotStatus(String nodeId, ShardState state, String reason) { this.nodeId = nodeId; this.state = state; this.reason = reason; @@ -272,11 +260,11 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement public ShardSnapshotStatus(StreamInput in) throws IOException { nodeId = in.readOptionalString(); - state = State.fromValue(in.readByte()); + state = ShardState.fromValue(in.readByte()); reason = in.readOptionalString(); } - public State state() { + public ShardState state() { return state; } @@ -298,14 +286,9 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - ShardSnapshotStatus status = (ShardSnapshotStatus) o; + return Objects.equals(nodeId, status.nodeId) && Objects.equals(reason, status.reason) && state == status.state; - if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false; - if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false; - if (state != status.state) return false; - - return true; } @Override @@ -331,11 +314,11 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement MISSING((byte) 5, true, true), WAITING((byte) 6, false, false); - private byte value; + private final byte value; - private boolean completed; + private final boolean completed; - private boolean failed; + private final boolean failed; State(byte value, boolean completed, boolean failed) { this.value = value; @@ -379,7 +362,6 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement private final List entries; - public SnapshotsInProgress(List entries) { this.entries = entries; } @@ -437,7 +419,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement builder.put(shardId, new ShardSnapshotStatus(in)); } else { String nodeId = in.readOptionalString(); - State shardState = State.fromValue(in.readByte()); + ShardState shardState = ShardState.fromValue(in.readByte()); // Workaround for https://github.com/elastic/elasticsearch/issues/25878 // Some old snapshot might still have null in shard failure reasons String reason = shardState.failed() ? "" : null; @@ -484,7 +466,7 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement shardEntry.value.writeTo(out); } else { out.writeOptionalString(shardEntry.value.nodeId()); - out.writeByte(shardEntry.value.state().value()); + out.writeByte(shardEntry.value.state().value); } } out.writeLong(entry.repositoryStateId); @@ -555,4 +537,52 @@ public class SnapshotsInProgress extends AbstractNamedDiffable implement builder.endArray(); builder.endObject(); } + + public enum ShardState { + INIT((byte) 0, false, false), + SUCCESS((byte) 2, true, false), + FAILED((byte) 3, true, true), + ABORTED((byte) 4, false, true), + MISSING((byte) 5, true, true), + WAITING((byte) 6, false, false); + + private final byte value; + + private final boolean completed; + + private final boolean failed; + + ShardState(byte value, boolean completed, boolean failed) { + this.value = value; + this.completed = completed; + this.failed = failed; + } + + public boolean completed() { + return completed; + } + + public boolean failed() { + return failed; + } + + public static ShardState fromValue(byte value) { + switch (value) { + case 0: + return INIT; + case 2: + return SUCCESS; + case 3: + return FAILED; + case 4: + return ABORTED; + case 5: + return MISSING; + case 6: + return WAITING; + default: + throw new IllegalArgumentException("No shard snapshot state for value [" + value + "]"); + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java index 16eb3bad1b5..347bd714af7 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -246,7 +247,8 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements // Add all new shards to start processing on final ShardId shardId = shard.key; final ShardSnapshotStatus shardSnapshotStatus = shard.value; - if (localNodeId.equals(shardSnapshotStatus.nodeId()) && shardSnapshotStatus.state() == State.INIT + if (localNodeId.equals(shardSnapshotStatus.nodeId()) + && shardSnapshotStatus.state() == ShardState.INIT && snapshotShards.containsKey(shardId) == false) { logger.trace("[{}] - Adding shard to the queue", shardId); if (startedShards == null) { @@ -284,7 +286,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements } else { // due to CS batching we might have missed the INIT state and straight went into ABORTED // notify master that abort has completed by moving to FAILED - if (shard.value.state() == State.ABORTED) { + if (shard.value.state() == ShardState.ABORTED) { notifyFailedSnapshotShard(snapshot, shard.key, shard.value.reason()); } } @@ -477,12 +479,14 @@ public class SnapshotShardsService extends AbstractLifecycleComponent implements /** Notify the master node that the given shard has been successfully snapshotted **/ private void notifySuccessfulSnapshotShard(final Snapshot snapshot, final ShardId shardId) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.SUCCESS)); + sendSnapshotShardUpdate(snapshot, shardId, + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.SUCCESS)); } /** Notify the master node that the given shard failed to be snapshotted **/ private void notifyFailedSnapshotShard(final Snapshot snapshot, final ShardId shardId, final String failure) { - sendSnapshotShardUpdate(snapshot, shardId, new ShardSnapshotStatus(clusterService.localNode().getId(), State.FAILED, failure)); + sendSnapshotShardUpdate(snapshot, shardId, + new ShardSnapshotStatus(clusterService.localNode().getId(), ShardState.FAILED, failure)); } /** Updates the shard snapshot status by sending a {@link UpdateIndexShardSnapshotStatusRequest} to the master node */ diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 11bf6f07831..b1d365f7ff1 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; @@ -774,7 +775,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus logger.warn("failing snapshot of shard [{}] on closed node [{}]", shardEntry.key, shardStatus.nodeId()); shards.put(shardEntry.key, - new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "node shutdown")); + new ShardSnapshotStatus(shardStatus.nodeId(), ShardState.FAILED, "node shutdown")); } } } @@ -870,7 +871,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus for (ObjectObjectCursor shardEntry : snapshotShards) { ShardSnapshotStatus shardStatus = shardEntry.value; ShardId shardId = shardEntry.key; - if (shardStatus.state() == State.WAITING) { + if (shardStatus.state() == ShardState.WAITING) { IndexRoutingTable indexShardRoutingTable = routingTable.index(shardId.getIndex()); if (indexShardRoutingTable != null) { IndexShardRoutingTable shardRouting = indexShardRoutingTable.shard(shardId.id()); @@ -891,7 +892,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus // Shard that we were waiting for went into unassigned state or disappeared - giving up snapshotChanged = true; logger.warn("failing snapshot of shard [{}] on unassigned shard [{}]", shardId, shardStatus.nodeId()); - shards.put(shardId, new ShardSnapshotStatus(shardStatus.nodeId(), State.FAILED, "shard is unassigned")); + shards.put(shardId, new ShardSnapshotStatus(shardStatus.nodeId(), ShardState.FAILED, "shard is unassigned")); } else { shards.put(shardId, shardStatus); } @@ -941,7 +942,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus Set missing = new HashSet<>(); Set closed = new HashSet<>(); for (ObjectObjectCursor entry : shards) { - if (entry.value.state() == State.MISSING) { + if (entry.value.state() == ShardState.MISSING) { if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) { closed.add(entry.key.getIndex().getName()); @@ -1192,7 +1193,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) { ShardSnapshotStatus status = shardEntry.value; if (status.state().completed() == false) { - status = new ShardSnapshotStatus(status.nodeId(), State.ABORTED, "aborted by snapshot deletion"); + status = new ShardSnapshotStatus(status.nodeId(), ShardState.ABORTED, "aborted by snapshot deletion"); } shardsBuilder.put(shardEntry.key, status); } @@ -1382,7 +1383,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus if (indexMetaData == null) { // The index was deleted before we managed to start the snapshot - mark it as missing. builder.put(new ShardId(indexName, IndexMetaData.INDEX_UUID_NA_VALUE, 0), - new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index")); + new SnapshotsInProgress.ShardSnapshotStatus(null, ShardState.MISSING, "missing index")); } else { IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(indexName); for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) { @@ -1391,18 +1392,18 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus ShardRouting primary = indexRoutingTable.shard(i).primaryShard(); if (primary == null || !primary.assignedToNode()) { builder.put(shardId, - new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "primary shard is not allocated")); + new SnapshotsInProgress.ShardSnapshotStatus(null, ShardState.MISSING, "primary shard is not allocated")); } else if (primary.relocating() || primary.initializing()) { - builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.WAITING)); + builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), ShardState.WAITING)); } else if (!primary.started()) { builder.put(shardId, - new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), State.MISSING, + new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId(), ShardState.MISSING, "primary shard hasn't been started yet")); } else { builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(primary.currentNodeId())); } } else { - builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, + builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, ShardState.MISSING, "missing routing table")); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/SnapshotsInProgressTests.java b/server/src/test/java/org/elasticsearch/cluster/SnapshotsInProgressTests.java index fcf70909b31..eac06c786a2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/SnapshotsInProgressTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/SnapshotsInProgressTests.java @@ -21,6 +21,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.index.shard.ShardId; @@ -55,11 +56,11 @@ public class SnapshotsInProgressTests extends ESTestCase { ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); // test more than one waiting shard in an index - shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); - shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); + shards.put(new ShardId(idx1Name, idx1UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); + shards.put(new ShardId(idx1Name, idx1UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); shards.put(new ShardId(idx1Name, idx1UUID, 2), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); // test exactly one waiting shard in an index - shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), State.WAITING)); + shards.put(new ShardId(idx2Name, idx2UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), ShardState.WAITING)); shards.put(new ShardId(idx2Name, idx2UUID, 1), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); // test no waiting shards in an index shards.put(new ShardId(idx3Name, idx3UUID, 0), new ShardSnapshotStatus(randomAlphaOfLength(2), randomNonWaitingState(), "")); @@ -72,7 +73,7 @@ public class SnapshotsInProgressTests extends ESTestCase { assertFalse(waitingIndices.containsKey(idx3Name)); } - private State randomNonWaitingState() { - return randomFrom(Arrays.stream(State.values()).filter(s -> s != State.WAITING).collect(Collectors.toSet())); + private ShardState randomNonWaitingState() { + return randomFrom(Arrays.stream(ShardState.values()).filter(s -> s != ShardState.WAITING).collect(Collectors.toSet())); } } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java index 001a83710dc..8e2fff4e275 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java @@ -54,6 +54,7 @@ import org.elasticsearch.cluster.RestoreInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; import org.elasticsearch.cluster.SnapshotsInProgress.ShardSnapshotStatus; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -2702,9 +2703,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas public ClusterState execute(ClusterState currentState) { // Simulate orphan snapshot ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder(); - shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); - shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); + shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", ShardState.ABORTED, "aborted")); List entries = new ArrayList<>(); entries.add(new Entry(new Snapshot(repositoryName, createSnapshotResponse.getSnapshotInfo().snapshotId()), diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 3f23c8f0a2d..6c8ddfb56c1 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress.Entry; +import org.elasticsearch.cluster.SnapshotsInProgress.ShardState; import org.elasticsearch.cluster.SnapshotsInProgress.State; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -65,7 +66,7 @@ public class SnapshotsInProgressSerializationTests extends AbstractDiffableWireS for (int j = 0; j < shardsCount; j++) { ShardId shardId = new ShardId(new Index(randomAlphaOfLength(10), randomAlphaOfLength(10)), randomIntBetween(0, 10)); String nodeId = randomAlphaOfLength(10); - State shardState = randomFrom(State.values()); + ShardState shardState = randomFrom(ShardState.values()); builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(nodeId, shardState, shardState.failed() ? randomAlphaOfLength(10) : null)); } From c4f44024af580840c060783f8c6bb7f1657484fd Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 12:24:20 +0200 Subject: [PATCH 13/40] Remove Delete Method from BlobStore (#41619) (#42574) * Remove Delete Method from BlobStore (#41619) * The delete method on the blob store was used almost nowhere and just duplicates the delete method on the blob containers * The fact that it provided for some recursive delete logic (that did not behave the same way on all implementations) was not used and not properly tested either --- .../common/blobstore/url/URLBlobStore.java | 17 ------- .../repositories/azure/AzureBlobStore.java | 15 ------ .../gcs/GoogleCloudStorageBlobStore.java | 16 +----- .../repositories/hdfs/HdfsBlobStore.java | 8 --- .../repositories/s3/S3BlobStore.java | 49 ------------------- .../common/blobstore/BlobStore.java | 7 --- .../common/blobstore/fs/FsBlobStore.java | 11 ----- .../blobstore/BlobStoreRepository.java | 5 +- .../snapshots/mockstore/BlobStoreWrapper.java | 5 -- .../repositories/ESBlobStoreTestCase.java | 3 -- 10 files changed, 5 insertions(+), 131 deletions(-) diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index a7042b8bfee..8f5ce9b0ffe 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -57,9 +57,6 @@ public class URLBlobStore implements BlobStore { new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); } - /** - * {@inheritDoc} - */ @Override public String toString() { return path.toString(); @@ -83,9 +80,6 @@ public class URLBlobStore implements BlobStore { return this.bufferSizeInBytes; } - /** - * {@inheritDoc} - */ @Override public BlobContainer blobContainer(BlobPath path) { try { @@ -95,17 +89,6 @@ public class URLBlobStore implements BlobStore { } } - /** - * This operation is not supported by URL Blob Store - */ - @Override - public void delete(BlobPath path) { - throw new UnsupportedOperationException("URL repository is read only"); - } - - /** - * {@inheritDoc} - */ @Override public void close() { // nothing to do here... diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 654836ea0fb..697125fbd53 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -22,8 +22,6 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -40,8 +38,6 @@ import java.util.Map; import static java.util.Collections.emptyMap; public class AzureBlobStore implements BlobStore { - - private static final Logger logger = LogManager.getLogger(AzureBlobStore.class); private final AzureStorageService service; @@ -82,17 +78,6 @@ public class AzureBlobStore implements BlobStore { return new AzureBlobContainer(path, this); } - @Override - public void delete(BlobPath path) throws IOException { - final String keyPath = path.buildAsString(); - try { - service.deleteFiles(clientName, container, keyPath); - } catch (URISyntaxException | StorageException e) { - logger.warn("cannot access [{}] in container {{}}: {}", keyPath, container, e.getMessage()); - throw new IOException(e); - } - } - @Override public void close() { } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 88489c4fcb1..011adfe5403 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -50,11 +50,11 @@ import java.nio.channels.ReadableByteChannel; import java.nio.channels.WritableByteChannel; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; +import java.util.Map; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; @@ -91,11 +91,6 @@ class GoogleCloudStorageBlobStore implements BlobStore { return new GoogleCloudStorageBlobContainer(path, this); } - @Override - public void delete(BlobPath path) throws IOException { - deleteBlobsByPrefix(path.buildAsString()); - } - @Override public void close() { } @@ -291,15 +286,6 @@ class GoogleCloudStorageBlobStore implements BlobStore { } } - /** - * Deletes multiple blobs from the specific bucket all of which have prefixed names - * - * @param prefix prefix of the blobs to delete - */ - private void deleteBlobsByPrefix(String prefix) throws IOException { - deleteBlobsIgnoringIfNotExists(listBlobsByPrefix("", prefix).keySet()); - } - /** * Deletes multiple blobs from the specific bucket using a batch request * diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index fde7657fe31..ad0e6630585 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -66,14 +66,6 @@ final class HdfsBlobStore implements BlobStore { }); } - @Override - public void delete(BlobPath path) throws IOException { - execute((Operation) fc -> { - fc.delete(translateToHdfsPath(path), true); - return null; - }); - } - @Override public String toString() { return root.toUri().toString(); diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index d4df4094fcf..fcded005535 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -20,10 +20,6 @@ package org.elasticsearch.repositories.s3; import com.amazonaws.services.s3.model.CannedAccessControlList; -import com.amazonaws.services.s3.model.DeleteObjectsRequest; -import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion; -import com.amazonaws.services.s3.model.ObjectListing; -import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.StorageClass; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobContainer; @@ -33,7 +29,6 @@ import org.elasticsearch.common.blobstore.BlobStoreException; import org.elasticsearch.common.unit.ByteSizeValue; import java.io.IOException; -import java.util.ArrayList; import java.util.Locale; class S3BlobStore implements BlobStore { @@ -90,50 +85,6 @@ class S3BlobStore implements BlobStore { return new S3BlobContainer(path, this); } - @Override - public void delete(BlobPath path) { - try (AmazonS3Reference clientReference = clientReference()) { - ObjectListing prevListing = null; - // From - // http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html - // we can do at most 1K objects per delete - // We don't know the bucket name until first object listing - DeleteObjectsRequest multiObjectDeleteRequest = null; - final ArrayList keys = new ArrayList<>(); - while (true) { - ObjectListing list; - if (prevListing != null) { - final ObjectListing finalPrevListing = prevListing; - list = SocketAccess.doPrivileged(() -> clientReference.client().listNextBatchOfObjects(finalPrevListing)); - } else { - list = SocketAccess.doPrivileged(() -> clientReference.client().listObjects(bucket, path.buildAsString())); - multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); - } - for (final S3ObjectSummary summary : list.getObjectSummaries()) { - keys.add(new KeyVersion(summary.getKey())); - // Every 500 objects batch the delete request - if (keys.size() > 500) { - multiObjectDeleteRequest.setKeys(keys); - final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; - SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); - multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName()); - keys.clear(); - } - } - if (list.isTruncated()) { - prevListing = list; - } else { - break; - } - } - if (!keys.isEmpty()) { - multiObjectDeleteRequest.setKeys(keys); - final DeleteObjectsRequest finalMultiObjectDeleteRequest = multiObjectDeleteRequest; - SocketAccess.doPrivilegedVoid(() -> clientReference.client().deleteObjects(finalMultiObjectDeleteRequest)); - } - } - } - @Override public void close() throws IOException { this.service.close(); diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java index e4cdb148a15..6ed6722995c 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.blobstore; import java.io.Closeable; -import java.io.IOException; /** * An interface for storing blobs. @@ -30,10 +29,4 @@ public interface BlobStore extends Closeable { * Get a blob container instance for storing blobs at the given {@link BlobPath}. */ BlobContainer blobContainer(BlobPath path); - - /** - * Delete the blob store at the given {@link BlobPath}. - */ - void delete(BlobPath path) throws IOException; - } diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index 8a4d51e4dc9..60c39a48e09 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -19,7 +19,6 @@ package org.elasticsearch.common.blobstore.fs; -import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; @@ -72,16 +71,6 @@ public class FsBlobStore implements BlobStore { } } - @Override - public void delete(BlobPath path) throws IOException { - assert readOnly == false : "should not delete anything from a readonly repository: " + path; - //noinspection ConstantConditions in case assertions are disabled - if (readOnly) { - throw new ElasticsearchException("unexpectedly deleting [" + path + "] from a readonly repository"); - } - IOUtils.rm(buildPath(path)); - } - @Override public void close() { // nothing to do here... diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index b71b472a253..0d4c1623d2d 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -629,7 +629,10 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp public void endVerification(String seed) { if (isReadOnly() == false) { try { - blobStore().delete(basePath().add(testBlobPrefix(seed))); + final String testPrefix = testBlobPrefix(seed); + final BlobContainer container = blobStore().blobContainer(basePath().add(testPrefix)); + container.deleteBlobsIgnoringIfNotExists(new ArrayList<>(container.listBlobs().keySet())); + blobStore().blobContainer(basePath()).deleteBlobIgnoringIfNotExists(testPrefix); } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java index 08e0c6fdcfa..cdb2ef3ce2d 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java @@ -37,11 +37,6 @@ public class BlobStoreWrapper implements BlobStore { return delegate.blobContainer(path); } - @Override - public void delete(BlobPath path) throws IOException { - delegate.delete(path); - } - @Override public void close() throws IOException { delegate.close(); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java index ccc38ae3629..a32d8419273 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreTestCase.java @@ -49,9 +49,6 @@ public abstract class ESBlobStoreTestCase extends ESTestCase { assertTrue(containerFoo.blobExists("test")); assertTrue(containerBar.blobExists("test")); - store.delete(new BlobPath()); - assertFalse(containerFoo.blobExists("test")); - assertFalse(containerBar.blobExists("test")); } } From a94d24ae5ad2f7a1695cf83c2ad191c456c7dab4 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Mon, 27 May 2019 13:57:17 +0200 Subject: [PATCH 14/40] Fix RareClusterStateIT (#42430) (#42580) * It looks like we might be cancelling a previous publication instead of the one triggered by the given request with a very low likelihood. * Fixed by adding a wait for no in-progress publications * Also added debug logging that would've identified this problem * Closes #36813 --- .../cluster/coordination/Coordinator.java | 10 +++++++--- .../cluster/coordination/RareClusterStateIT.java | 4 ++++ 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index e6acac17da9..af9a38bec49 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -1198,9 +1198,13 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery */ boolean cancelCommittedPublication() { synchronized (mutex) { - if (currentPublication.isPresent() && currentPublication.get().isCommitted()) { - currentPublication.get().cancel("cancelCommittedPublication"); - return true; + if (currentPublication.isPresent()) { + final CoordinatorPublication publication = currentPublication.get(); + if (publication.isCommitted()) { + publication.cancel("cancelCommittedPublication"); + logger.debug("Cancelled publication of [{}].", publication); + return true; + } } return false; } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java index 4c0168c6e3a..614bede3288 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/RareClusterStateIT.java @@ -141,6 +141,10 @@ public class RareClusterStateIT extends ESIntegTestCase { private ActionFuture executeAndCancelCommittedPublication( ActionRequestBuilder req) throws Exception { + // Wait for no publication in progress to not accidentally cancel a publication different from the one triggered by the given + // request. + assertBusy( + () -> assertFalse(((Coordinator) internalCluster().getCurrentMasterNodeInstance(Discovery.class)).publicationInProgress())); ActionFuture future = req.execute(); assertBusy( () -> assertTrue(((Coordinator)internalCluster().getCurrentMasterNodeInstance(Discovery.class)).cancelCommittedPublication())); From de6be819d66fb4ee081a9cad2de25231fdf05340 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 17:30:21 -0400 Subject: [PATCH 15/40] Allocate to data-only nodes in ReopenWhileClosingIT (#42560) If all primary shards are allocated on the master node, then the verifying before close step will never interact with mock transport service. This change prefers to allocate shards on data-only nodes. Closes #39757 --- .../indices/state/ReopenWhileClosingIT.java | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java b/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java index 25d8f07bbd1..8cf3b76184a 100644 --- a/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java +++ b/server/src/test/java/org/elasticsearch/indices/state/ReopenWhileClosingIT.java @@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Glob; import org.elasticsearch.common.Strings; import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; @@ -50,7 +51,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, minNumDataNodes = 2) +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class ReopenWhileClosingIT extends ESIntegTestCase { @Override @@ -64,8 +65,9 @@ public class ReopenWhileClosingIT extends ESIntegTestCase { } public void testReopenDuringClose() throws Exception { + List dataOnlyNodes = internalCluster().startDataOnlyNodes(randomIntBetween(2, 3)); final String indexName = "test"; - createIndexWithDocs(indexName); + createIndexWithDocs(indexName, dataOnlyNodes); ensureYellowAndNoInitializingShards(indexName); @@ -85,10 +87,11 @@ public class ReopenWhileClosingIT extends ESIntegTestCase { } public void testReopenDuringCloseOnMultipleIndices() throws Exception { + List dataOnlyNodes = internalCluster().startDataOnlyNodes(randomIntBetween(2, 3)); final List indices = new ArrayList<>(); for (int i = 0; i < randomIntBetween(2, 10); i++) { indices.add("index-" + i); - createIndexWithDocs(indices.get(i)); + createIndexWithDocs(indices.get(i), dataOnlyNodes); } ensureYellowAndNoInitializingShards(indices.toArray(Strings.EMPTY_ARRAY)); @@ -116,8 +119,9 @@ public class ReopenWhileClosingIT extends ESIntegTestCase { }); } - private void createIndexWithDocs(final String indexName) { - createIndex(indexName); + private void createIndexWithDocs(final String indexName, final Collection dataOnlyNodes) { + createIndex(indexName, + Settings.builder().put(indexSettings()).put("index.routing.allocation.include._name", String.join(",", dataOnlyNodes)).build()); final int nbDocs = scaledRandomIntBetween(1, 100); for (int i = 0; i < nbDocs; i++) { index(indexName, "_doc", String.valueOf(i), "num", i); From 4123ade2b6641cec674eb508ae86baabf0acea9a Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 15:57:59 -0400 Subject: [PATCH 16/40] Add test ensure we can execute update requests in mixed cluster Relates #42596 --- .../elasticsearch/upgrades/RecoveryIT.java | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java index 4fb5341e1bf..863172e91e7 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/RecoveryIT.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.document.RestIndexAction; +import org.elasticsearch.rest.action.document.RestUpdateAction; import org.elasticsearch.test.rest.yaml.ObjectPath; import org.hamcrest.Matcher; @@ -572,4 +573,23 @@ public class RecoveryIT extends AbstractRollingTestCase { }); }, 60, TimeUnit.SECONDS); } + + /** Ensure that we can always execute update requests regardless of the version of cluster */ + public void testUpdateDoc() throws Exception { + final String index = "test_update_doc"; + if (CLUSTER_TYPE == ClusterType.OLD) { + Settings.Builder settings = Settings.builder() + .put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) + .put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2); + createIndex(index, settings.build()); + } + ensureGreen(index); + indexDocs(index, 0, 10); + for (int i = 0; i < 10; i++) { + Request update = new Request("POST", index + "/test/" + i + "/_update/"); + update.setOptions(expectWarnings(RestUpdateAction.TYPES_DEPRECATION_MESSAGE)); + update.setJsonEntity("{\"doc\": {\"f\": " + randomNonNegativeLong() + "}}"); + client().performRequest(update); + } + } } From ab832c4f17d198627354c80f4581f4bb53be95a0 Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 20:38:58 -0400 Subject: [PATCH 17/40] Use doc instead of _doc in FullClusterRestartIT ES does not accept doc type starting with underscore until 6.2.0. We have to use "doc" instead of "_doc" in FullClusterRestartIT if we are upgrading from a 6.2.0- cluster. Closes #42581 --- .../java/org/elasticsearch/upgrades/FullClusterRestartIT.java | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 7ea5e0cdb35..88389d97c89 100644 --- a/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -1031,7 +1031,6 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { * This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version, * it verifies that the index exists and is replicated if the old version supports replication. */ - @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/42581") public void testClosedIndices() throws Exception { if (isRunningAgainstOldCluster()) { createIndex(index, Settings.builder() @@ -1043,7 +1042,7 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase { if (randomBoolean()) { numDocs = between(1, 100); for (int i = 0; i < numDocs; i++) { - final Request request = new Request("POST", "/" + index + "/_doc/" + i); + final Request request = new Request("POST", "/" + index + "/" + type + "/" + i); request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject())); assertOK(client().performRequest(request)); if (rarely()) { From 2077f9ffbc8a4c7fd64884cd75147a7e546bd34e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Mon, 27 May 2019 21:44:36 -0400 Subject: [PATCH 18/40] Reset mock transport service in CcrRetentionLeaseIT (#42600) testRetentionLeaseIsAddedIfItDisappearsWhileFollowing does not reset the mock transport service after test. Surviving transport interceptors from that test can sneaky remove retention leases and make other tests fail. Closes #39331 Closes #39509 Closes #41428 Closes #41679 Closes #41737 Closes #41756 --- .../xpack/ccr/CcrRetentionLeaseIT.java | 89 ++++++++++--------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java index eb4f4be8423..9b3eaa7de55 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CcrRetentionLeaseIT.java @@ -6,7 +6,6 @@ package org.elasticsearch.xpack.ccr; -import com.carrotsearch.hppc.cursors.ObjectCursor; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,7 +43,6 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.snapshots.RestoreInfo; import org.elasticsearch.snapshots.RestoreService; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.RemoteTransportException; @@ -88,7 +86,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; -@TestLogging(value = "org.elasticsearch.xpack.ccr:trace,org.elasticsearch.indices.recovery:trace,org.elasticsearch.index.seqno:debug") public class CcrRetentionLeaseIT extends CcrIntegTestCase { public static final class RetentionLeaseRenewIntervalSettingPlugin extends Plugin { @@ -224,9 +221,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { // block the recovery from completing; this ensures the background sync is still running final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (ClearCcrRestoreSessionAction.NAME.equals(action) @@ -248,9 +245,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); latch.countDown(); } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -405,9 +402,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) @@ -456,9 +453,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -488,9 +485,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Remove.ACTION_NAME.equals(action) @@ -526,9 +523,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { getLeaderCluster().getClusterName(), new Index(leaderIndex, leaderUUID)))); } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } @@ -766,35 +763,36 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { final CountDownLatch latch = new CountDownLatch(1); final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { - final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); - senderTransportService.addSendBehavior( + try { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) - || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { + || TransportActionProxy.getProxyAction(RetentionLeaseActions.Renew.ACTION_NAME).equals(action)) { senderTransportService.clearAllRules(); final RetentionLeaseActions.RenewRequest renewRequest = (RetentionLeaseActions.RenewRequest) request; final String primaryShardNodeId = - getLeaderCluster() - .clusterService() - .state() - .routingTable() - .index(leaderIndex) - .shard(renewRequest.getShardId().id()) - .primaryShard() - .currentNodeId(); + getLeaderCluster() + .clusterService() + .state() + .routingTable() + .index(leaderIndex) + .shard(renewRequest.getShardId().id()) + .primaryShard() + .currentNodeId(); final String primaryShardNodeName = - getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); + getLeaderCluster().clusterService().state().nodes().get(primaryShardNodeId).getName(); final IndexShard primary = - getLeaderCluster() - .getInstance(IndicesService.class, primaryShardNodeName) - .getShardOrNull(renewRequest.getShardId()); + getLeaderCluster() + .getInstance(IndicesService.class, primaryShardNodeName) + .getShardOrNull(renewRequest.getShardId()); final CountDownLatch innerLatch = new CountDownLatch(1); // this forces the background renewal from following to face a retention lease not found exception primary.removeRetentionLease( - getRetentionLeaseId(followerIndex, leaderIndex), - ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); + getRetentionLeaseId(followerIndex, leaderIndex), + ActionListener.wrap(r -> innerLatch.countDown(), e -> fail(e.toString()))); try { innerLatch.await(); @@ -807,11 +805,18 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { } connection.sendRequest(requestId, action, request, options); }); + } + + latch.await(); + + assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); + } finally { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { + final MockTransportService senderTransportService = + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); + senderTransportService.clearAllRules(); + } } - - latch.await(); - - assertRetentionLeaseRenewal(numberOfShards, numberOfReplicas, followerIndex, leaderIndex); } /** @@ -858,9 +863,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { final ClusterStateResponse followerClusterState = followerClient().admin().cluster().prepareState().clear().setNodes(true).get(); try { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.addSendBehavior( (connection, requestId, action, request, options) -> { if (RetentionLeaseActions.Renew.ACTION_NAME.equals(action) @@ -914,9 +919,9 @@ public class CcrRetentionLeaseIT extends CcrIntegTestCase { assertThat(Strings.toString(shardStats), shardStats.getRetentionLeaseStats().retentionLeases().leases(), empty()); } } finally { - for (final ObjectCursor senderNode : followerClusterState.getState().nodes().getDataNodes().values()) { + for (final DiscoveryNode senderNode : followerClusterState.getState().nodes()) { final MockTransportService senderTransportService = - (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.value.getName()); + (MockTransportService) getFollowerCluster().getInstance(TransportService.class, senderNode.getName()); senderTransportService.clearAllRules(); } } From c079fb61bfdfdc1ac2c8a61b9c61639ad1e4aa6e Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 08:00:02 +0200 Subject: [PATCH 19/40] Remove Dead Code from Azure Repo Plugin (#42178) (#42569) * None of this stuff is used --- .../repositories/azure/AzureBlobStore.java | 7 +----- .../repositories/azure/AzureRepository.java | 11 ++------- .../azure/AzureStorageService.java | 24 +++---------------- .../azure/AzureStorageSettings.java | 12 ++-------- .../repositories/azure/SocketAccess.java | 2 +- .../azure/AzureBlobStoreContainerTests.java | 15 ++++-------- .../azure/AzureBlobStoreTests.java | 16 ++++--------- .../azure/AzureStorageServiceMock.java | 14 ++--------- 8 files changed, 19 insertions(+), 82 deletions(-) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 697125fbd53..7eeadc7f647 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -45,8 +45,7 @@ public class AzureBlobStore implements BlobStore { private final String container; private final LocationMode locationMode; - public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) - throws URISyntaxException, StorageException { + public AzureBlobStore(RepositoryMetaData metadata, AzureStorageService service) { this.container = Repository.CONTAINER_SETTING.get(metadata.settings()); this.clientName = Repository.CLIENT_NAME.get(metadata.settings()); this.service = service; @@ -69,10 +68,6 @@ public class AzureBlobStore implements BlobStore { return locationMode; } - public String getClientName() { - return clientName; - } - @Override public BlobContainer blobContainer(BlobPath path) { return new AzureBlobContainer(path, this); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java index 5345fb13f6d..70ab72a232c 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java @@ -115,20 +115,16 @@ public class AzureRepository extends BlobStoreRepository { } } - // only use for testing @Override protected BlobStore getBlobStore() { return super.getBlobStore(); } - /** - * {@inheritDoc} - */ @Override - protected AzureBlobStore createBlobStore() throws URISyntaxException, StorageException { + protected AzureBlobStore createBlobStore() { final AzureBlobStore blobStore = new AzureBlobStore(metadata, storageService); - logger.debug((org.apache.logging.log4j.util.Supplier) () -> new ParameterizedMessage( + logger.debug(() -> new ParameterizedMessage( "using container [{}], chunk_size [{}], compress [{}], base_path [{}]", blobStore, chunkSize, isCompress(), basePath)); return blobStore; @@ -139,9 +135,6 @@ public class AzureRepository extends BlobStoreRepository { return basePath; } - /** - * {@inheritDoc} - */ @Override protected ByteSizeValue chunkSize() { return chunkSize; diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 79e8d4c6235..5e4fa772f77 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -98,7 +98,7 @@ public class AzureStorageService { } } - protected CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient buildClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final CloudBlobClient client = createClient(azureStorageSettings); // Set timeout option if the user sets cloud.azure.storage.timeout or // cloud.azure.storage.xxx.timeout (it's negative by default) @@ -116,12 +116,12 @@ public class AzureStorageService { return client; } - protected CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { + private static CloudBlobClient createClient(AzureStorageSettings azureStorageSettings) throws InvalidKeyException, URISyntaxException { final String connectionString = azureStorageSettings.buildConnectionString(); return CloudStorageAccount.parse(connectionString).createCloudBlobClient(); } - protected OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { + private static OperationContext buildOperationContext(AzureStorageSettings azureStorageSettings) { final OperationContext context = new OperationContext(); context.setProxy(azureStorageSettings.getProxy()); return context; @@ -147,24 +147,6 @@ public class AzureStorageService { return SocketAccess.doPrivilegedException(() -> blobContainer.exists(null, null, client.v2().get())); } - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Tuple> client = client(account); - // container name must be lower case. - logger.trace(() -> new ParameterizedMessage("delete files container [{}], path [{}]", container, path)); - SocketAccess.doPrivilegedVoidException(() -> { - // list the blobs using a flat blob listing mode - final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); - for (final ListBlobItem blobItem : blobContainer.listBlobs(path, true, EnumSet.noneOf(BlobListingDetails.class), null, - client.v2().get())) { - final String blobName = blobNameFromUri(blobItem.getUri()); - logger.trace(() -> new ParameterizedMessage("removing blob [{}] full URI was [{}]", blobName, blobItem.getUri())); - // don't call {@code #deleteBlob}, use the same client - final CloudBlockBlob azureBlob = blobContainer.getBlockBlobReference(blobName); - azureBlob.delete(DeleteSnapshotsOption.NONE, null, null, client.v2().get()); - } - }); - } - /** * Extract the blob name from a URI like https://myservice.azure.net/container/path/to/myfile * It should remove the container part (first part of the path) and gives path/to/myfile diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java index 1c90f97a437..508d1ef3178 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageSettings.java @@ -129,14 +129,6 @@ final class AzureStorageSettings { this.locationMode = LocationMode.PRIMARY_ONLY; } - public String getKey() { - return key; - } - - public String getAccount() { - return account; - } - public String getEndpointSuffix() { return endpointSuffix; } @@ -207,7 +199,7 @@ final class AzureStorageSettings { // pkg private for tests /** Parse settings for a single client. */ - static AzureStorageSettings getClientSettings(Settings settings, String clientName) { + private static AzureStorageSettings getClientSettings(Settings settings, String clientName) { try (SecureString account = getConfigValue(settings, clientName, ACCOUNT_SETTING); SecureString key = getConfigValue(settings, clientName, KEY_SETTING)) { return new AzureStorageSettings(account.toString(), key.toString(), @@ -226,7 +218,7 @@ final class AzureStorageSettings { return concreteSetting.get(settings); } - public static T getValue(Settings settings, String groupName, Setting setting) { + private static T getValue(Settings settings, String groupName, Setting setting) { final Setting.AffixKey k = (Setting.AffixKey) setting.getRawKey(); final String fullKey = k.toConcreteKey(groupName).toString(); return setting.getConcreteSetting(fullKey).get(settings); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java index da8b8543006..1400cc5b066 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/SocketAccess.java @@ -48,7 +48,7 @@ public final class SocketAccess { } } - public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException, URISyntaxException { + public static T doPrivilegedException(PrivilegedExceptionAction operation) throws StorageException { SpecialPermission.check(); try { return AccessController.doPrivileged(operation); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java index a06dd7c3f28..13cc487a1c1 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreContainerTests.java @@ -19,24 +19,17 @@ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; -import java.io.IOException; -import java.net.URISyntaxException; public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java index 9a0c9039d08..67d30fda05b 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobStoreTests.java @@ -18,25 +18,17 @@ */ package org.elasticsearch.repositories.azure; -import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.repositories.ESBlobStoreTestCase; -import java.io.IOException; -import java.net.URISyntaxException; - public class AzureBlobStoreTests extends ESBlobStoreTestCase { @Override - protected BlobStore newBlobStore() throws IOException { - try { - RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); - AzureStorageServiceMock client = new AzureStorageServiceMock(); - return new AzureBlobStore(repositoryMetaData, client); - } catch (URISyntaxException | StorageException e) { - throw new IOException(e); - } + protected BlobStore newBlobStore() { + RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY); + AzureStorageServiceMock client = new AzureStorageServiceMock(); + return new AzureBlobStore(repositoryMetaData, client); } } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java index 18eb529c0ee..3df197bc98e 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageServiceMock.java @@ -34,7 +34,6 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.net.SocketPermission; -import java.net.URISyntaxException; import java.nio.file.FileAlreadyExistsException; import java.nio.file.NoSuchFileException; import java.security.AccessController; @@ -61,21 +60,13 @@ public class AzureStorageServiceMock extends AzureStorageService { return true; } - @Override - public void deleteFiles(String account, String container, String path) throws URISyntaxException, StorageException { - final Map blobs = listBlobsByPrefix(account, container, path, null); - for (String key : blobs.keySet()) { - deleteBlob(account, container, key); - } - } - @Override public boolean blobExists(String account, String container, String blob) { return blobs.containsKey(blob); } @Override - public void deleteBlob(String account, String container, String blob) throws URISyntaxException, StorageException { + public void deleteBlob(String account, String container, String blob) throws StorageException { if (blobs.remove(blob) == null) { throw new StorageException("BlobNotFound", "[" + blob + "] does not exist.", 404, null, null); } @@ -109,8 +100,7 @@ public class AzureStorageServiceMock extends AzureStorageService { @Override public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize, - boolean failIfAlreadyExists) - throws URISyntaxException, StorageException, FileAlreadyExistsException { + boolean failIfAlreadyExists) throws StorageException, FileAlreadyExistsException { if (failIfAlreadyExists && blobs.containsKey(blobName)) { throw new FileAlreadyExistsException(blobName); } From 1d09367a82fb511a72d80dc0b7f7d37865e126ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?G=C3=BCrkan=20Kaymak?= Date: Tue, 28 May 2019 10:37:04 +0300 Subject: [PATCH 20/40] Fixed ignoring name parameter for percolator queries (#42598) Closes #40405 --- .../percolator/PercolateQueryBuilder.java | 21 ++++++++++++--- .../PercolateQueryBuilderTests.java | 26 +++++++++++++++++++ 2 files changed, 43 insertions(+), 4 deletions(-) diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 44200823b6d..67db4ba9c0e 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -181,7 +181,7 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder documentSupplier) { + protected PercolateQueryBuilder(String field, String documentType, Supplier documentSupplier) { if (field == null) { throw new IllegalArgumentException("[field] is a required argument"); } @@ -519,8 +519,12 @@ public class PercolateQueryBuilder extends AbstractQueryBuilder docs) { RAMDirectory ramDirectory = new RAMDirectory(); try (IndexWriter indexWriter = new IndexWriter(ramDirectory, new IndexWriterConfig(analyzer))) { diff --git a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java index 6053a92b54a..5b4dc610900 100644 --- a/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java +++ b/modules/percolator/src/test/java/org/elasticsearch/percolator/PercolateQueryBuilderTests.java @@ -54,6 +54,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.function.Supplier; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; import static org.hamcrest.Matchers.equalTo; @@ -331,4 +332,29 @@ public class PercolateQueryBuilderTests extends AbstractQueryTestCase supplier = () -> new BytesArray("{\"test\": \"test\"}"); + String testName = "name1"; + QueryShardContext shardContext = createShardContext(); + PercolateQueryBuilder percolateQueryBuilder = new PercolateQueryBuilder(queryField, null, supplier); + percolateQueryBuilder.setName(testName); + + QueryBuilder rewrittenQueryBuilder = percolateQueryBuilder.doRewrite(shardContext); + + assertEquals(testName, ((PercolateQueryBuilder) rewrittenQueryBuilder).getQueryName()); + assertNotEquals(rewrittenQueryBuilder, percolateQueryBuilder); + } + } From 635ce0ca6d2af56563bc91245e56035e93601803 Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 28 May 2019 10:20:22 +0200 Subject: [PATCH 21/40] Mute AsyncTwoPhaseIndexerTests#testStateMachine() (#42610) Relates #42084 Relates #42609 --- .../xpack/core/indexing/AsyncTwoPhaseIndexerTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 4249d7c61d0..95b3de5eb33 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -225,6 +225,7 @@ public class AsyncTwoPhaseIndexerTests extends ESTestCase { } } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42084") public void testStateMachine() throws Exception { AtomicReference state = new AtomicReference<>(IndexerState.STOPPED); final ExecutorService executor = Executors.newFixedThreadPool(1); From 44bf784fe1a993fde79bb683208fa5508a7268cb Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 10:46:22 +0200 Subject: [PATCH 22/40] Add Infrastructure to Run 3rd Party Repository Tests (#42586) (#42604) * Add Infrastructure to Run 3rd Party Repository Tests * Add infrastructure to run third party repository tests using our standard JUnit infrastructure * This is a prerequisite of #42189 --- plugins/repository-azure/build.gradle | 21 +++++ .../AzureStorageCleanupThirdPartyTests.java | 65 +++++++++++++ plugins/repository-gcs/build.gradle | 21 +++++ .../GoogleCloudStorageThirdPartyTests.java | 64 +++++++++++++ plugins/repository-s3/build.gradle | 43 ++++++++- .../s3/S3RepositoryThirdPartyTests.java | 73 +++++++++++++++ .../AbstractThirdPartyRepositoryTestCase.java | 91 +++++++++++++++++++ 7 files changed, 373 insertions(+), 5 deletions(-) create mode 100644 plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java create mode 100644 plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index a7c1af412d9..2669e4bf609 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -71,3 +71,24 @@ testClusters { keystore 'azure.client.integration_test.key', 'azure_key' } } + +String azureAccount = System.getenv("azure_storage_account") +String azureKey = System.getenv("azure_storage_key") +String azureContainer = System.getenv("azure_storage_container") +String azureBasePath = System.getenv("azure_storage_base_path") + +test { + exclude '**/AzureStorageCleanupThirdPartyTests.class' +} + +task thirdPartyTest(type: Test) { + include '**/AzureStorageCleanupThirdPartyTests.class' + systemProperty 'test.azure.account', azureAccount ? azureAccount : "" + systemProperty 'test.azure.key', azureKey ? azureKey : "" + systemProperty 'test.azure.container', azureContainer ? azureContainer : "" + systemProperty 'test.azure.base', azureBasePath ? azureBasePath : "" +} + +if (azureAccount || azureKey || azureContainer || azureBasePath) { + check.dependsOn(thirdPartyTest) +} diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java new file mode 100644 index 00000000000..596fdf73342 --- /dev/null +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureStorageCleanupThirdPartyTests.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.azure; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class AzureStorageCleanupThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(AzureRepositoryPlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.azure.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.key"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.container"), not(blankOrNullString())); + assertThat(System.getProperty("test.azure.base"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("azure.client.default.account", System.getProperty("test.azure.account")); + secureSettings.setString("azure.client.default.key", System.getProperty("test.azure.key")); + return secureSettings; + } + + @Override + protected void createRepository(String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository(repoName) + .setType("azure") + .setSettings(Settings.builder() + .put("container", System.getProperty("test.azure.container")) + .put("base_path", System.getProperty("test.azure.base")) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e5af9081ca1..288ab3c99f1 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -1,3 +1,5 @@ +import java.nio.file.Files + /* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with @@ -122,3 +124,22 @@ check { // also execute the QA tests when testing the plugin dependsOn 'qa:google-cloud-storage:check' } + +String gcsServiceAccount = System.getenv("google_storage_service_account") +String gcsBucket = System.getenv("google_storage_bucket") +String gcsBasePath = System.getenv("google_storage_base_path") + +test { + exclude '**/GoogleCloudStorageThirdPartyTests.class' +} + +task thirdPartyTest(type: Test) { + include '**/GoogleCloudStorageThirdPartyTests.class' + systemProperty 'test.google.account', gcsServiceAccount ? Base64.encoder.encodeToString(Files.readAllBytes(file(gcsServiceAccount).toPath())) : "" + systemProperty 'test.google.bucket', gcsBucket ? gcsBucket : "" + systemProperty 'test.google.base', gcsBasePath ? gcsBasePath : "/" +} + +if (gcsServiceAccount || gcsBucket || gcsBasePath) { + check.dependsOn(thirdPartyTest) +} \ No newline at end of file diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java new file mode 100644 index 00000000000..06eb63ddd22 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageThirdPartyTests.java @@ -0,0 +1,64 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.repositories.gcs; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; + +import java.util.Base64; +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class GoogleCloudStorageThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(GoogleCloudStoragePlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.google.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.google.bucket"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setFile("gcs.client.default.credentials_file", + Base64.getDecoder().decode(System.getProperty("test.google.account"))); + return secureSettings; + } + + @Override + protected void createRepository(final String repoName) { + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("gcs") + .setSettings(Settings.builder() + .put("bucket", System.getProperty("test.google.bucket")) + .put("base_path", System.getProperty("test.google.base", "/")) + ).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index d933bcef490..531215c1ace 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -75,6 +75,7 @@ test { // these are tested explicitly in separate test tasks exclude '**/*CredentialsTests.class' exclude '**/S3BlobStoreRepositoryTests.class' + exclude '**/S3RepositoryThirdPartyTests.class' } boolean useFixture = false @@ -134,6 +135,14 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath) { throw new IllegalArgumentException("not all options specified to run EC2/ECS tests are present") } +task thirdPartyTest(type: Test) { + include '**/S3RepositoryThirdPartyTests.class' + systemProperty 'test.s3.account', s3PermanentAccessKey + systemProperty 'test.s3.key', s3PermanentSecretKey + systemProperty 'test.s3.bucket', s3PermanentBucket + systemProperty 'test.s3.base', s3PermanentBasePath +} + if (useFixture) { apply plugin: 'elasticsearch.test.fixtures' task writeDockerFile { @@ -151,6 +160,32 @@ if (useFixture) { dependsOn(writeDockerFile) } + def minioAddress = { + int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" + assert minioPort > 0 + return 'http://127.0.0.1:' + minioPort + } + + File minioAddressFile = new File(project.buildDir, 'generated-resources/s3Fixture.address') + + // We can't lazy evaluate a system property for the Minio address passed to JUnit so we write it to a resource file + // and pass its name instead. + task writeMinioAddress { + dependsOn tasks.bundlePlugin, tasks.postProcessFixture + outputs.file(minioAddressFile) + doLast { + file(minioAddressFile).text = "${ -> minioAddress.call() }" + } + } + + thirdPartyTest { + dependsOn writeMinioAddress + inputs.file(minioAddressFile) + systemProperty 'test.s3.endpoint', minioAddressFile.name + } + + BuildPlugin.requireDocker(tasks.thirdPartyTest) + task integTestMinio(type: RestIntegTestTask) { description = "Runs REST tests using the Minio repository." dependsOn tasks.bundlePlugin, tasks.postProcessFixture @@ -169,11 +204,7 @@ if (useFixture) { testClusters.integTestMinio { keystore 's3.client.integration_test_permanent.access_key', s3PermanentAccessKey keystore 's3.client.integration_test_permanent.secret_key', s3PermanentSecretKey - setting 's3.client.integration_test_permanent.endpoint', { - int minioPort = postProcessFixture.ext."test.fixtures.minio-fixture.tcp.9000" - assert minioPort > 0 - return 'http://127.0.0.1:' + minioPort - } + setting 's3.client.integration_test_permanent.endpoint', minioAddress plugin file(tasks.bundlePlugin.archiveFile) } @@ -191,6 +222,8 @@ if (useFixture) { } } +check.dependsOn(thirdPartyTest) + File parentFixtures = new File(project.buildDir, "fixtures") File s3FixtureFile = new File(parentFixtures, 's3Fixture.properties') diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java new file mode 100644 index 00000000000..88e29357548 --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3RepositoryThirdPartyTests.java @@ -0,0 +1,73 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.AbstractThirdPartyRepositoryTestCase; +import org.elasticsearch.test.StreamsUtils; + +import java.io.IOException; +import java.util.Collection; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.not; + +public class S3RepositoryThirdPartyTests extends AbstractThirdPartyRepositoryTestCase { + + @Override + protected Collection> getPlugins() { + return pluginList(S3RepositoryPlugin.class); + } + + @Override + protected SecureSettings credentials() { + assertThat(System.getProperty("test.s3.account"), not(blankOrNullString())); + assertThat(System.getProperty("test.s3.key"), not(blankOrNullString())); + assertThat(System.getProperty("test.s3.bucket"), not(blankOrNullString())); + + MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString("s3.client.default.access_key", System.getProperty("test.s3.account")); + secureSettings.setString("s3.client.default.secret_key", System.getProperty("test.s3.key")); + return secureSettings; + } + + @Override + protected void createRepository(String repoName) { + Settings.Builder settings = Settings.builder() + .put("bucket", System.getProperty("test.s3.bucket")) + .put("base_path", System.getProperty("test.s3.base", "/")); + final String endpointPath = System.getProperty("test.s3.endpoint"); + if (endpointPath != null) { + try { + settings = settings.put("endpoint", StreamsUtils.copyToStringFromClasspath("/" + endpointPath)); + } catch (IOException e) { + throw new AssertionError(e); + } + } + AcknowledgedResponse putRepositoryResponse = client().admin().cluster().preparePutRepository("test-repo") + .setType("s3") + .setSettings(settings).get(); + assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true)); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java new file mode 100644 index 00000000000..90c399a5af6 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/repositories/AbstractThirdPartyRepositoryTestCase.java @@ -0,0 +1,91 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories; + +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.common.settings.SecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.snapshots.SnapshotState; +import org.elasticsearch.test.ESSingleNodeTestCase; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; + +public abstract class AbstractThirdPartyRepositoryTestCase extends ESSingleNodeTestCase { + + @Override + protected Settings nodeSettings() { + return Settings.builder() + .put(super.nodeSettings()) + .setSecureSettings(credentials()) + .build(); + } + + protected abstract SecureSettings credentials(); + + protected abstract void createRepository(String repoName); + + + public void testCreateSnapshot() { + createRepository("test-repo"); + + createIndex("test-idx-1"); + createIndex("test-idx-2"); + createIndex("test-idx-3"); + ensureGreen(); + + logger.info("--> indexing some data"); + for (int i = 0; i < 100; i++) { + client().prepareIndex("test-idx-1", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-2", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + client().prepareIndex("test-idx-3", "doc", Integer.toString(i)).setSource("foo", "bar" + i).get(); + } + client().admin().indices().prepareRefresh().get(); + + final String snapshotName = "test-snap-" + System.currentTimeMillis(); + + logger.info("--> snapshot"); + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot("test-repo", snapshotName) + .setWaitForCompletion(true) + .setIndices("test-idx-*", "-test-idx-3") + .get(); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards())); + + assertThat(client().admin() + .cluster() + .prepareGetSnapshots("test-repo") + .setSnapshots(snapshotName) + .get() + .getSnapshots() + .get(0) + .state(), + equalTo(SnapshotState.SUCCESS)); + + assertTrue(client().admin() + .cluster() + .prepareDeleteSnapshot("test-repo", snapshotName) + .get() + .isAcknowledged()); + + } +} From 116b050cc6ff04e5d992ae36f2817ff193959383 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 11:00:28 +0200 Subject: [PATCH 23/40] Cleanup Bulk Delete Exception Logging (#41693) (#42606) * Cleanup Bulk Delete Exception Logging * Follow up to #41368 * Collect all failed blob deletes and add them to the exception message * Remove logging of blob name list from caller exception logging --- .../gcs/GoogleCloudStorageBlobStore.java | 1 + .../repositories/s3/S3BlobContainer.java | 32 +++++++++++++------ .../blobstore/BlobStoreRepository.java | 12 +++---- 3 files changed, 30 insertions(+), 15 deletions(-) diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 011adfe5403..c90d49bd73d 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -329,6 +329,7 @@ class GoogleCloudStorageBlobStore implements BlobStore { if (e != null) { throw new IOException("Exception when deleting blobs [" + failedBlobs + "]", e); } + assert failedBlobs.isEmpty(); } private static String buildKey(String keyPath, String s) { diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 652fa6a3601..c057d330da5 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -25,6 +25,7 @@ import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; import com.amazonaws.services.s3.model.PartETag; @@ -34,6 +35,7 @@ import com.amazonaws.services.s3.model.S3ObjectSummary; import com.amazonaws.services.s3.model.UploadPartRequest; import com.amazonaws.services.s3.model.UploadPartResult; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobMetaData; @@ -50,6 +52,8 @@ import java.nio.file.NoSuchFileException; import java.util.ArrayList; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.repositories.s3.S3Repository.MAX_FILE_SIZE; import static org.elasticsearch.repositories.s3.S3Repository.MAX_FILE_SIZE_USING_MULTIPART; @@ -127,12 +131,13 @@ class S3BlobContainer extends AbstractBlobContainer { if (blobNames.isEmpty()) { return; } + final Set outstanding = blobNames.stream().map(this::buildKey).collect(Collectors.toSet()); try (AmazonS3Reference clientReference = blobStore.clientReference()) { // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes final List deleteRequests = new ArrayList<>(); final List partition = new ArrayList<>(); - for (String blob : blobNames) { - partition.add(buildKey(blob)); + for (String key : outstanding) { + partition.add(key); if (partition.size() == MAX_BULK_DELETES ) { deleteRequests.add(bulkDelete(blobStore.bucket(), partition)); partition.clear(); @@ -144,23 +149,32 @@ class S3BlobContainer extends AbstractBlobContainer { SocketAccess.doPrivilegedVoid(() -> { AmazonClientException aex = null; for (DeleteObjectsRequest deleteRequest : deleteRequests) { + List keysInRequest = + deleteRequest.getKeys().stream().map(DeleteObjectsRequest.KeyVersion::getKey).collect(Collectors.toList()); try { clientReference.client().deleteObjects(deleteRequest); + outstanding.removeAll(keysInRequest); + } catch (MultiObjectDeleteException e) { + // We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead + // first remove all keys that were sent in the request and then add back those that ran into an exception. + outstanding.removeAll(keysInRequest); + outstanding.addAll( + e.getErrors().stream().map(MultiObjectDeleteException.DeleteError::getKey).collect(Collectors.toSet())); + aex = ExceptionsHelper.useOrSuppress(aex, e); } catch (AmazonClientException e) { - if (aex == null) { - aex = e; - } else { - aex.addSuppressed(e); - } + // The AWS client threw any unexpected exception and did not execute the request at all so we do not + // remove any keys from the outstanding deletes set. + aex = ExceptionsHelper.useOrSuppress(aex, e); } } if (aex != null) { throw aex; } }); - } catch (final AmazonClientException e) { - throw new IOException("Exception when deleting blobs [" + blobNames + "]", e); + } catch (Exception e) { + throw new IOException("Failed to delete blobs [" + outstanding + "]", e); } + assert outstanding.isEmpty(); } private static DeleteObjectsRequest bulkDelete(String bucket, List blobs) { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 0d4c1623d2d..67ea26616ac 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -998,8 +998,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { blobContainer.deleteBlobsIgnoringIfNotExists(blobNames); } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blobs {} during finalization", - snapshotId, shardId, blobNames), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blobs during finalization", + snapshotId, shardId), e); throw e; } @@ -1014,8 +1014,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { blobContainer.deleteBlobsIgnoringIfNotExists(indexBlobs); } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blobs {} during finalization", - snapshotId, shardId, indexBlobs), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete index blobs during finalization", + snapshotId, shardId), e); throw e; } @@ -1027,8 +1027,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { blobContainer.deleteBlobsIgnoringIfNotExists(orphanedBlobs); } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete data blobs {} during finalization", - snapshotId, shardId, orphanedBlobs), e); + logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to delete data blobs during finalization", + snapshotId, shardId), e); } } catch (IOException e) { String message = From adb3574af830b04c2ce3b1b34d087c15ad9141db Mon Sep 17 00:00:00 2001 From: Daniel Mitterdorfer Date: Tue, 28 May 2019 12:25:18 +0200 Subject: [PATCH 24/40] Mute NodeTests (#42615) Relates #42577 Relates #42614 --- server/src/test/java/org/elasticsearch/node/NodeTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/server/src/test/java/org/elasticsearch/node/NodeTests.java b/server/src/test/java/org/elasticsearch/node/NodeTests.java index 6f0419421b8..a5653eb88e1 100644 --- a/server/src/test/java/org/elasticsearch/node/NodeTests.java +++ b/server/src/test/java/org/elasticsearch/node/NodeTests.java @@ -50,6 +50,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/42577") @LuceneTestCase.SuppressFileSystems(value = "ExtrasFS") public class NodeTests extends ESTestCase { From 00d665540a1cf06111e0c988289edd3e59d23b21 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 12:44:40 +0200 Subject: [PATCH 25/40] Make unwrapCorrupt Check Suppressed Ex. (#41889) (#42605) * Make unwrapCorrupt Check Suppressed Ex. (#41889) * As discussed in #24800 we want to check for suppressed corruption indicating exceptions here as well to more reliably categorize corruption related exceptions * Closes #24800, 41201 --- .../org/elasticsearch/ExceptionsHelper.java | 36 +++++++++++++++++-- .../elasticsearch/ExceptionsHelperTests.java | 28 +++++++++++++++ .../recovery/RecoverySourceHandlerTests.java | 4 ++- 3 files changed, 64 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java index e4269a375dd..94c4a273159 100644 --- a/server/src/main/java/org/elasticsearch/ExceptionsHelper.java +++ b/server/src/main/java/org/elasticsearch/ExceptionsHelper.java @@ -175,12 +175,42 @@ public final class ExceptionsHelper { return first; } + private static final List> CORRUPTION_EXCEPTIONS = + Arrays.asList(CorruptIndexException.class, IndexFormatTooOldException.class, IndexFormatTooNewException.class); + + /** + * Looks at the given Throwable's and its cause(s) as well as any suppressed exceptions on the Throwable as well as its causes + * and returns the first corruption indicating exception (as defined by {@link #CORRUPTION_EXCEPTIONS}) it finds. + * @param t Throwable + * @return Corruption indicating exception if one is found, otherwise {@code null} + */ public static IOException unwrapCorruption(Throwable t) { - return (IOException) unwrap(t, CorruptIndexException.class, - IndexFormatTooOldException.class, - IndexFormatTooNewException.class); + if (t != null) { + do { + for (Class clazz : CORRUPTION_EXCEPTIONS) { + if (clazz.isInstance(t)) { + return (IOException) t; + } + } + for (Throwable suppressed : t.getSuppressed()) { + IOException corruptionException = unwrapCorruption(suppressed); + if (corruptionException != null) { + return corruptionException; + } + } + } while ((t = t.getCause()) != null); + } + return null; } + /** + * Looks at the given Throwable and its cause(s) and returns the first Throwable that is of one of the given classes or {@code null} + * if no matching Throwable is found. Unlike {@link #unwrapCorruption} this method does only check the given Throwable and its causes + * but does not look at any suppressed exceptions. + * @param t Throwable + * @param clazzes Classes to look for + * @return Matching Throwable if one is found, otherwise {@code null} + */ public static Throwable unwrap(Throwable t, Class... clazzes) { if (t != null) { do { diff --git a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java index 1d2a4ca6d5f..2de2f259e6f 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionsHelperTests.java @@ -20,6 +20,7 @@ package org.elasticsearch; import org.apache.commons.codec.DecoderException; +import org.apache.lucene.index.CorruptIndexException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.ShardOperationFailedException; import org.elasticsearch.action.search.ShardSearchFailure; @@ -183,4 +184,31 @@ public class ExceptionsHelperTests extends ESTestCase { ShardOperationFailedException[] groupBy = ExceptionsHelper.groupBy(failures); assertThat(groupBy.length, equalTo(2)); } + + public void testUnwrapCorruption() { + final Throwable corruptIndexException = new CorruptIndexException("corrupt", "resource"); + assertThat(ExceptionsHelper.unwrapCorruption(corruptIndexException), equalTo(corruptIndexException)); + + final Throwable corruptionAsCause = new RuntimeException(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionAsCause), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressed = new RuntimeException(); + corruptionSuppressed.addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressed), equalTo(corruptIndexException)); + + final Throwable corruptionSuppressedOnCause = new RuntimeException(new RuntimeException()); + corruptionSuppressedOnCause.getCause().addSuppressed(corruptIndexException); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionSuppressedOnCause), equalTo(corruptIndexException)); + + final Throwable corruptionCauseOnSuppressed = new RuntimeException(); + corruptionCauseOnSuppressed.addSuppressed(new RuntimeException(corruptIndexException)); + assertThat(ExceptionsHelper.unwrapCorruption(corruptionCauseOnSuppressed), equalTo(corruptIndexException)); + + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException()), nullValue()); + assertThat(ExceptionsHelper.unwrapCorruption(new RuntimeException(new RuntimeException())), nullValue()); + + final Throwable withSuppressedException = new RuntimeException(); + withSuppressedException.addSuppressed(new RuntimeException()); + assertThat(ExceptionsHelper.unwrapCorruption(withSuppressedException), nullValue()); + } } diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 044c4b94e5d..b00e89575cc 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -440,10 +440,12 @@ public class RecoverySourceHandlerTests extends ESTestCase { handler.sendFiles(store, metas.toArray(new StoreFileMetaData[0]), () -> 0); fail("exception index"); } catch (RuntimeException ex) { - assertNull(ExceptionsHelper.unwrapCorruption(ex)); + final IOException unwrappedCorruption = ExceptionsHelper.unwrapCorruption(ex); if (throwCorruptedIndexException) { + assertNotNull(unwrappedCorruption); assertEquals(ex.getMessage(), "[File corruption occurred on recovery but checksums are ok]"); } else { + assertNull(unwrappedCorruption); assertEquals(ex.getMessage(), "boom"); } } catch (CorruptIndexException ex) { From 746a2f41fd8e27cbc7ad71ed4971679e99c97c9e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 28 May 2019 12:25:53 +0100 Subject: [PATCH 26/40] Remove PRE_60_NODE_CHECKPOINT (#42531) This commit removes the obsolete `PRE_60_NODE_CHECKPOINT` constant for dealing with 5.x nodes' lack of sequence number support. Backport of #42527 --- .../TransportReplicationAction.java | 21 ++----- .../index/seqno/ReplicationTracker.java | 56 +++++-------------- .../index/seqno/SequenceNumbers.java | 4 -- .../elasticsearch/index/shard/IndexShard.java | 5 +- .../cluster/IndicesClusterStateService.java | 24 +------- .../index/engine/InternalEngineTests.java | 2 +- .../index/engine/NoOpEngineTests.java | 2 +- ...ReplicationTrackerRetentionLeaseTests.java | 36 ++++-------- .../index/seqno/ReplicationTrackerTests.java | 46 +++++++-------- .../index/shard/IndexShardTests.java | 9 ++- .../shard/PrimaryReplicaSyncerTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 3 +- .../ESIndexLevelReplicationTestCase.java | 6 +- .../index/shard/IndexShardTestCase.java | 10 ++-- .../engine/FollowEngineIndexShardTests.java | 2 +- 15 files changed, 74 insertions(+), 156 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java index ad66d192bf5..d981a314716 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/TransportReplicationAction.java @@ -981,28 +981,15 @@ public abstract class TransportReplicationAction< @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - if (in.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - localCheckpoint = in.readZLong(); - } else { - // 5.x used to read empty responses, which don't really read anything off the stream, so just do nothing. - localCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; - } - if (in.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { - globalCheckpoint = in.readZLong(); - } else { - globalCheckpoint = SequenceNumbers.PRE_60_NODE_CHECKPOINT; - } + localCheckpoint = in.readZLong(); + globalCheckpoint = in.readZLong(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(Version.V_6_0_0_alpha1)) { - out.writeZLong(localCheckpoint); - } - if (out.getVersion().onOrAfter(Version.V_6_0_0_rc1)) { - out.writeZLong(globalCheckpoint); - } + out.writeZLong(localCheckpoint); + out.writeZLong(globalCheckpoint); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 8d16c5bcf6f..219d2096f7f 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -550,9 +550,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L "checkpoints map should always have an entry for the current shard"; // local checkpoints only set during primary mode - assert primaryMode || checkpoints.values().stream() - .allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO || - lcps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT); + assert primaryMode || checkpoints.values().stream().allMatch(lcps -> lcps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // global checkpoints for other shards only set during primary mode assert primaryMode @@ -561,9 +559,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L .stream() .filter(e -> e.getKey().equals(shardAllocationId) == false) .map(Map.Entry::getValue) - .allMatch(cps -> - (cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO - || cps.globalCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT)); + .allMatch(cps -> cps.globalCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO); // relocation handoff can only occur in primary mode assert !handoffInProgress || primaryMode; @@ -642,7 +638,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L .stream() .filter(cps -> cps.inSync) .mapToLong(function) - .filter(v -> v != SequenceNumbers.PRE_60_NODE_CHECKPOINT && v != SequenceNumbers.UNASSIGNED_SEQ_NO)); + .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO)); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } @@ -789,14 +785,12 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L /** * Notifies the tracker of the current allocation IDs in the cluster state. - * * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the master * @param inSyncAllocationIds the allocation IDs of the currently in-sync shard copies * @param routingTable the shard routing table - * @param pre60AllocationIds the allocation IDs of shards that are allocated to pre-6.0 nodes */ public synchronized void updateFromMaster(final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable, final Set pre60AllocationIds) { + final IndexShardRoutingTable routingTable) { assert invariant(); if (applyingClusterStateVersion > appliedClusterStateVersion) { // check that the master does not fabricate new in-sync entries out of thin air once we are in primary mode @@ -817,8 +811,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L final boolean inSync = inSyncAllocationIds.contains(initializingId); assert inSync == false : "update from master in primary mode has " + initializingId + " as in-sync but it does not exist locally"; - final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync, inSync)); } @@ -829,8 +822,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L } else { for (String initializingId : initializingAllocationIds) { if (shardAllocationId.equals(initializingId) == false) { - final long localCheckpoint = pre60AllocationIds.contains(initializingId) ? - SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); } @@ -842,8 +834,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L checkpointState.inSync = true; checkpointState.tracked = true; } else { - final long localCheckpoint = pre60AllocationIds.contains(inSyncId) ? - SequenceNumbers.PRE_60_NODE_CHECKPOINT : SequenceNumbers.UNASSIGNED_SEQ_NO; + final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); } @@ -931,13 +922,9 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L } private boolean updateLocalCheckpoint(String allocationId, CheckpointState cps, long localCheckpoint) { - // a local checkpoint of PRE_60_NODE_CHECKPOINT cannot be overridden - assert cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT || - localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT : - "pre-6.0 shard copy " + allocationId + " unexpected to send valid local checkpoint " + localCheckpoint; - // a local checkpoint for a shard copy should be a valid sequence number or the pre-6.0 sequence number indicator - assert localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO : - "invalid local checkpoint for shard copy [" + allocationId + "]"; + // a local checkpoint for a shard copy should be a valid sequence number + assert localCheckpoint >= SequenceNumbers.NO_OPS_PERFORMED : + "invalid local checkpoint [" + localCheckpoint + "] for shard copy [" + allocationId + "]"; if (localCheckpoint > cps.localCheckpoint) { logger.trace("updated local checkpoint of [{}] from [{}] to [{}]", allocationId, cps.localCheckpoint, localCheckpoint); cps.localCheckpoint = localCheckpoint; @@ -996,8 +983,6 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; - } else if (cps.localCheckpoint == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - // 5.x replica, ignore for global checkpoint calculation } else { minLocalCheckpoint = Math.min(cps.localCheckpoint, minLocalCheckpoint); } @@ -1069,18 +1054,11 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L handoffInProgress = false; relocated = true; // forget all checkpoint information except for global checkpoint of current shard - checkpoints.entrySet().stream().forEach(e -> { - final CheckpointState cps = e.getValue(); - if (cps.localCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.localCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } - if (e.getKey().equals(shardAllocationId) == false) { + checkpoints.forEach((key, cps) -> { + cps.localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; + if (key.equals(shardAllocationId) == false) { // don't throw global checkpoint information of current shard away - if (cps.globalCheckpoint != SequenceNumbers.UNASSIGNED_SEQ_NO && - cps.globalCheckpoint != SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + cps.globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; } }); assert invariant(); @@ -1117,17 +1095,13 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L assert primaryMode == false; final long lastAppliedClusterStateVersion = appliedClusterStateVersion; final Set inSyncAllocationIds = new HashSet<>(); - final Set pre60AllocationIds = new HashSet<>(); checkpoints.entrySet().forEach(entry -> { if (entry.getValue().inSync) { inSyncAllocationIds.add(entry.getKey()); } - if (entry.getValue().getLocalCheckpoint() == SequenceNumbers.PRE_60_NODE_CHECKPOINT) { - pre60AllocationIds.add(entry.getKey()); - } }); final IndexShardRoutingTable lastAppliedRoutingTable = routingTable; - return () -> updateFromMaster(lastAppliedClusterStateVersion, inSyncAllocationIds, lastAppliedRoutingTable, pre60AllocationIds); + return () -> updateFromMaster(lastAppliedClusterStateVersion, inSyncAllocationIds, lastAppliedRoutingTable); } /** diff --git a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java index 6336e83338f..87257a97076 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/SequenceNumbers.java @@ -28,10 +28,6 @@ public class SequenceNumbers { public static final String LOCAL_CHECKPOINT_KEY = "local_checkpoint"; public static final String MAX_SEQ_NO = "max_seq_no"; - /** - * Represents a checkpoint coming from a pre-6.0 node - */ - public static final long PRE_60_NODE_CHECKPOINT = -3L; /** * Represents an unassigned sequence number (e.g., can be used on primary operations before they are executed). */ diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index f25a6f0716d..fdd95614756 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -433,8 +433,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl final BiConsumer> primaryReplicaSyncer, final long applyingClusterStateVersion, final Set inSyncAllocationIds, - final IndexShardRoutingTable routingTable, - final Set pre60AllocationIds) throws IOException { + final IndexShardRoutingTable routingTable) throws IOException { final ShardRouting currentRouting; synchronized (mutex) { currentRouting = this.shardRouting; @@ -453,7 +452,7 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } if (newRouting.primary()) { - replicationTracker.updateFromMaster(applyingClusterStateVersion, inSyncAllocationIds, routingTable, pre60AllocationIds); + replicationTracker.updateFromMaster(applyingClusterStateVersion, inSyncAllocationIds, routingTable); } if (state == IndexShardState.POST_RECOVERY && newRouting.active()) { diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index 821e095fc20..5e8e8e6ec95 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -24,7 +24,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.LockObtainFailedException; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.replication.ReplicationResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -35,7 +34,6 @@ import org.elasticsearch.cluster.action.shard.ShardStateAction; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.RecoverySource.Type; import org.elasticsearch.cluster.routing.RoutingNode; @@ -94,8 +92,6 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.function.BiConsumer; import java.util.function.Consumer; -import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.CLOSED; import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED; @@ -630,21 +626,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple primaryTerm = indexMetaData.primaryTerm(shard.shardId().id()); final Set inSyncIds = indexMetaData.inSyncAllocationIds(shard.shardId().id()); final IndexShardRoutingTable indexShardRoutingTable = routingTable.shardRoutingTable(shardRouting.shardId()); - final Set pre60AllocationIds = indexShardRoutingTable.assignedShards() - .stream() - .flatMap(shr -> { - if (shr.relocating()) { - return Stream.of(shr, shr.getTargetRelocatingShard()); - } else { - return Stream.of(shr); - } - }) - .filter(shr -> nodes.get(shr.currentNodeId()).getVersion().before(Version.V_6_0_0_alpha1)) - .map(ShardRouting::allocationId) - .map(AllocationId::getId) - .collect(Collectors.toSet()); shard.updateShardState(shardRouting, primaryTerm, primaryReplicaSyncer::resync, clusterState.version(), - inSyncIds, indexShardRoutingTable, pre60AllocationIds); + inSyncIds, indexShardRoutingTable); } catch (Exception e) { failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState); return; @@ -810,7 +793,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple * - Updates and persists the new routing value. * - Updates the primary term if this shard is a primary. * - Updates the allocation ids that are tracked by the shard if it is a primary. - * See {@link ReplicationTracker#updateFromMaster(long, Set, IndexShardRoutingTable, Set)} for details. + * See {@link ReplicationTracker#updateFromMaster(long, Set, IndexShardRoutingTable)} for details. * * @param shardRouting the new routing entry * @param primaryTerm the new primary term @@ -826,8 +809,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable, - Set pre60AllocationIds) throws IOException; + IndexShardRoutingTable routingTable) throws IOException; } public interface AllocatedIndex extends Iterable, IndexComponent { diff --git a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index b9020d423ff..703f193a412 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -2281,7 +2281,7 @@ public class InternalEngineTests extends EngineTestCase { ReplicationTracker gcpTracker = (ReplicationTracker) initialEngine.config().getGlobalCheckpointSupplier(); gcpTracker.updateFromMaster(1L, new HashSet<>(Arrays.asList(primary.allocationId().getId(), replica.allocationId().getId())), - new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(shardId).addShard(primary).addShard(replica).build()); gcpTracker.activatePrimaryMode(primarySeqNo); for (int op = 0; op < opCount; op++) { final String id; diff --git a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java index 3a857a20468..f03500e6e12 100644 --- a/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java +++ b/server/src/test/java/org/elasticsearch/index/engine/NoOpEngineTests.java @@ -75,7 +75,7 @@ public class NoOpEngineTests extends EngineTestCase { ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node", null, true, ShardRoutingState.STARTED, allocationId); IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build(); - tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table, Collections.emptySet()); + tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table); tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); for (int i = 0; i < docs; i++) { ParsedDocument doc = testParsedDocument("" + i, null, testDocumentWithTextField(), B_1, null); diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java index 0e7cbaa42d1..2334cb43308 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerRetentionLeaseTests.java @@ -71,8 +71,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; @@ -113,8 +112,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final String id = randomAlphaOfLength(8); final long retainingSequenceNumber = randomNonNegativeLong(); @@ -142,8 +140,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final String id = randomAlphaOfLength(8); final RetentionLeaseNotFoundException e = expectThrows( @@ -179,8 +176,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); @@ -214,8 +210,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); final long[] minimumRetainingSequenceNumbers = new long[length]; @@ -265,8 +260,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final String id = randomAlphaOfLength(8); final RetentionLeaseNotFoundException e = expectThrows( @@ -302,8 +296,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); @@ -354,8 +347,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); if (primaryMode) { replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); } @@ -427,8 +419,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); final int length = randomIntBetween(0, 8); final List retentionLeasesCollection = new ArrayList<>(length); long primaryTerm = 1; @@ -481,8 +472,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); for (int i = 0; i < length; i++) { @@ -515,8 +505,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); for (int i = 0; i < length; i++) { @@ -564,8 +553,7 @@ public class ReplicationTrackerRetentionLeaseTests extends ReplicationTrackerTes replicationTracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(allocationId.getId()), - routingTable(Collections.emptySet(), allocationId), - Collections.emptySet()); + routingTable(Collections.emptySet(), allocationId)); replicationTracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED); final int length = randomIntBetween(0, 8); for (int i = 0; i < length; i++) { diff --git a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java index 037d2130b5c..10e84e6ec53 100644 --- a/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/elasticsearch/index/seqno/ReplicationTrackerTests.java @@ -120,7 +120,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); }); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1)); initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -147,7 +147,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { Set newInitializing = new HashSet<>(initializing); newInitializing.add(extraId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); tracker.initiateTracking(extraId.getId()); @@ -187,7 +187,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { final AllocationId primaryId = active.iterator().next(); final AllocationId replicaId = initializing.iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); tracker.activatePrimaryMode(localCheckpoint); tracker.initiateTracking(replicaId.getId()); @@ -229,7 +229,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { assigned.putAll(initializing); AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(initializing.keySet()).forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); final AllocationId missingActiveID = randomFrom(active.keySet()); @@ -256,7 +256,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED)); @@ -278,7 +278,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); final AllocationId primaryId = active.keySet().iterator().next(); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId), emptySet()); + tracker.updateFromMaster(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); initializing.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); nonApproved.keySet().forEach(k -> @@ -313,7 +313,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { allocations.putAll(initializingToBeRemoved); } final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId), emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); if (randomBoolean()) { initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); @@ -329,16 +329,14 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { tracker.updateFromMaster( initialClusterStateVersion + 1, ids(activeToStay.keySet()), - routingTable(initializingToStay.keySet(), primaryId), - emptySet()); + routingTable(initializingToStay.keySet(), primaryId)); allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); } else { allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); tracker.updateFromMaster( initialClusterStateVersion + 2, ids(activeToStay.keySet()), - routingTable(initializingToStay.keySet(), primaryId), - emptySet()); + routingTable(initializingToStay.keySet(), primaryId)); } final long checkpoint = Stream.concat(activeToStay.values().stream(), initializingToStay.values().stream()) @@ -357,7 +355,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { final ReplicationTracker tracker = newTracker(inSyncAllocationId); final long clusterStateVersion = randomNonNegativeLong(); tracker.updateFromMaster(clusterStateVersion, Collections.singleton(inSyncAllocationId.getId()), - routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); + routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId)); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -397,7 +395,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { } else { // master changes its mind and cancels the allocation tracker.updateFromMaster(clusterStateVersion + 1, Collections.singleton(inSyncAllocationId.getId()), - routingTable(emptySet(), inSyncAllocationId), emptySet()); + routingTable(emptySet(), inSyncAllocationId)); barrier.await(); assertTrue(complete.get()); assertNull(tracker.getTrackedLocalCheckpointForShard(trackingAllocationId.getId())); @@ -421,7 +419,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { final AllocationId trackingAllocationId = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(inSyncAllocationId); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(inSyncAllocationId.getId()), - routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId), emptySet()); + routingTable(Collections.singleton(trackingAllocationId), inSyncAllocationId)); tracker.activatePrimaryMode(globalCheckpoint); final Thread thread = new Thread(() -> { try { @@ -470,7 +468,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { AllocationId primaryId = activeAllocationIds.iterator().next(); IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); final ReplicationTracker tracker = newTracker(primaryId); - tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable, emptySet()); + tracker.updateFromMaster(initialClusterStateVersion, ids(activeAllocationIds), routingTable); tracker.activatePrimaryMode(NO_OPS_PERFORMED); assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); @@ -500,7 +498,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { final Set newInitializingAllocationIds = initializingIds.stream().filter(a -> !removingInitializingAllocationIds.contains(a)).collect(Collectors.toSet()); routingTable = routingTable(newInitializingAllocationIds, primaryId); - tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable, emptySet()); + tracker.updateFromMaster(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue(removingActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); @@ -517,8 +515,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { tracker.updateFromMaster( initialClusterStateVersion + 2, ids(newActiveAllocationIds), - routingTable(newInitializingAllocationIds, primaryId), - emptySet()); + routingTable(newInitializingAllocationIds, primaryId)); assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); assertTrue( newActiveAllocationIds @@ -565,8 +562,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { tracker.updateFromMaster( initialClusterStateVersion + 3, ids(newActiveAllocationIds), - routingTable(newInitializingAllocationIds, primaryId), - emptySet()); + routingTable(newInitializingAllocationIds, primaryId)); final CyclicBarrier barrier = new CyclicBarrier(2); final Thread thread = new Thread(() -> { try { @@ -604,8 +600,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { tracker.updateFromMaster( initialClusterStateVersion + 4, ids(newActiveAllocationIds), - routingTable(newInitializingAllocationIds, primaryId), - emptySet()); + routingTable(newInitializingAllocationIds, primaryId)); assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); } @@ -633,8 +628,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { tracker.updateFromMaster( randomNonNegativeLong(), Collections.singleton(active.getId()), - routingTable(Collections.singleton(initializing), active), - emptySet()); + routingTable(Collections.singleton(initializing), active)); tracker.activatePrimaryMode(activeLocalCheckpoint); final int nextActiveLocalCheckpoint = randomIntBetween(activeLocalCheckpoint + 1, Integer.MAX_VALUE); final Thread activeThread = new Thread(() -> { @@ -835,7 +829,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { final AllocationId initializing = AllocationId.newInitializing(); final ReplicationTracker tracker = newTracker(active); tracker.updateFromMaster(randomNonNegativeLong(), Collections.singleton(active.getId()), - routingTable(Collections.singleton(initializing), active), emptySet()); + routingTable(Collections.singleton(initializing), active)); tracker.activatePrimaryMode(NO_OPS_PERFORMED); expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(randomAlphaOfLength(10))); @@ -863,7 +857,7 @@ public class ReplicationTrackerTests extends ReplicationTrackerTestCase { } public void apply(ReplicationTracker gcp) { - gcp.updateFromMaster(version, ids(inSyncIds), routingTable, Collections.emptySet()); + gcp.updateFromMaster(version, ids(inSyncIds), routingTable); } } diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index 19a1ce4054a..5febd735f8f 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -603,7 +603,7 @@ public class IndexShardTests extends IndexShardTestCase { replicaRouting.allocationId()); indexShard.updateShardState(primaryRouting, newPrimaryTerm, (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build()); /* * This operation completing means that the delay operation executed as part of increasing the primary term has completed and the @@ -656,8 +656,8 @@ public class IndexShardTests extends IndexShardTestCase { latch.countDown(); }, 0L, Collections.singleton(indexShard.routingEntry().allocationId().getId()), - new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build(), - Collections.emptySet()); + new IndexShardRoutingTable.Builder(indexShard.shardId()).addShard(primaryRouting).build() + ); latch.await(); assertThat(indexShard.getActiveOperationsCount(), isOneOf(0, IndexShard.OPERATIONS_BLOCKED)); if (randomBoolean()) { @@ -1208,8 +1208,7 @@ public class IndexShardTests extends IndexShardTestCase { (s, r) -> resyncLatch.countDown(), 1L, Collections.singleton(newRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build(), - Collections.emptySet()); + new IndexShardRoutingTable.Builder(newRouting.shardId()).addShard(newRouting).build()); resyncLatch.await(); assertThat(indexShard.getLocalCheckpoint(), equalTo(maxSeqNo)); assertThat(indexShard.seqNoStats().getMaxSeqNo(), equalTo(maxSeqNo)); diff --git a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java index c7d59fdb7c2..e0825445bb8 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/PrimaryReplicaSyncerTests.java @@ -99,7 +99,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { String allocationId = shard.routingEntry().allocationId().getId(); shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build()); shard.updateLocalCheckpointForShard(allocationId, globalCheckPoint); assertEquals(globalCheckPoint, shard.getGlobalCheckpoint()); @@ -159,7 +159,7 @@ public class PrimaryReplicaSyncerTests extends IndexShardTestCase { String allocationId = shard.routingEntry().allocationId().getId(); shard.updateShardState(shard.routingEntry(), shard.getPendingPrimaryTerm(), null, 1000L, Collections.singleton(allocationId), - new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(shard.shardId()).addShard(shard.routingEntry()).build()); CountDownLatch syncCalledLatch = new CountDownLatch(1); PlainActionFuture fut = new PlainActionFuture() { diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 9b6cae43081..46b4d7fd035 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -355,8 +355,7 @@ public abstract class AbstractIndicesClusterStateServiceTestCase extends ESTestC BiConsumer> primaryReplicaSyncer, long applyingClusterStateVersion, Set inSyncAllocationIds, - IndexShardRoutingTable routingTable, - Set pre60AllocationIds) throws IOException { + IndexShardRoutingTable routingTable) throws IOException { failRandomly(); assertThat(this.shardId(), equalTo(shardRouting.shardId())); assertTrue("current: " + this.shardRouting + ", got: " + shardRouting, this.shardRouting.isSameAllocation(shardRouting)); diff --git a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java index d88cdf488fb..90fa78ae7e7 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/replication/ESIndexLevelReplicationTestCase.java @@ -293,7 +293,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase ShardRouting startedRoutingEntry = ShardRoutingHelper.moveToStarted(primary.routingEntry()); IndexShardRoutingTable routingTable = routingTable(shr -> shr == primary.routingEntry() ? startedRoutingEntry : shr); primary.updateShardState(startedRoutingEntry, primary.getPendingPrimaryTerm(), null, - currentClusterStateVersion.incrementAndGet(), activeIds, routingTable, Collections.emptySet()); + currentClusterStateVersion.incrementAndGet(), activeIds, routingTable); for (final IndexShard replica : replicas) { recoverReplica(replica); } @@ -385,7 +385,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase IndexShardRoutingTable routingTable = routingTable(shr -> shr == replica.routingEntry() ? primaryRouting : shr); primary.updateShardState(primaryRouting, newTerm, primaryReplicaSyncer, currentClusterStateVersion.incrementAndGet(), - activeIds(), routingTable, Collections.emptySet()); + activeIds(), routingTable); } private synchronized Set activeIds() { @@ -520,7 +520,7 @@ public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), - activeIds(), routingTable(Function.identity()), Collections.emptySet()); + activeIds(), routingTable(Function.identity())); } private synchronized void computeReplicationTargets() { diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index d1b66fe64e4..2a2176f1c10 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -548,7 +548,7 @@ public abstract class IndexShardTestCase extends ESTestCase { .addShard(shardRouting) .build(); shard.updateShardState(shardRouting, shard.getPendingPrimaryTerm(), null, currentClusterStateVersion.incrementAndGet(), - inSyncIds, newRoutingTable, Collections.emptySet()); + inSyncIds, newRoutingTable); } protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException { @@ -633,7 +633,7 @@ public abstract class IndexShardTestCase extends ESTestCase { new AsyncRecoveryTarget(recoveryTarget, threadPool.generic()), request, Math.toIntExact(ByteSizeUnit.MB.toBytes(1)), between(1, 8)); primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, - currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable, Collections.emptySet()); + currentClusterStateVersion.incrementAndGet(), inSyncIds, routingTable); PlainActionFuture future = new PlainActionFuture<>(); recovery.recoverToTarget(future); @@ -658,9 +658,9 @@ public abstract class IndexShardTestCase extends ESTestCase { inSyncIdsWithReplica.add(replica.routingEntry().allocationId().getId()); // update both primary and replica shard state primary.updateShardState(primary.routingEntry(), primary.getPendingPrimaryTerm(), null, - currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet()); + currentClusterStateVersion.incrementAndGet(), inSyncIdsWithReplica, newRoutingTable); replica.updateShardState(replica.routingEntry().moveToStarted(), replica.getPendingPrimaryTerm(), null, - currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable, Collections.emptySet()); + currentClusterStateVersion.get(), inSyncIdsWithReplica, newRoutingTable); } @@ -685,7 +685,7 @@ public abstract class IndexShardTestCase extends ESTestCase { (is, listener) -> listener.onResponse(new PrimaryReplicaSyncer.ResyncTask(1, "type", "action", "desc", null, Collections.emptyMap())), currentClusterStateVersion.incrementAndGet(), - inSyncIds, newRoutingTable, Collections.emptySet()); + inSyncIds, newRoutingTable); } private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java index 6d1811198cb..f8260f2fce5 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/index/engine/FollowEngineIndexShardTests.java @@ -80,7 +80,7 @@ public class FollowEngineIndexShardTests extends IndexShardTestCase { replicaRouting.allocationId()); indexShard.updateShardState(primaryRouting, indexShard.getOperationPrimaryTerm() + 1, (shard, listener) -> {}, 0L, Collections.singleton(primaryRouting.allocationId().getId()), - new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build(), Collections.emptySet()); + new IndexShardRoutingTable.Builder(primaryRouting.shardId()).addShard(primaryRouting).build()); final CountDownLatch latch = new CountDownLatch(1); ActionListener actionListener = ActionListener.wrap(releasable -> { From 5598647922cc439ed090caccff44cc8d1854d6ad Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 May 2019 13:35:01 +0200 Subject: [PATCH 27/40] Reset state recovery after successful recovery (#42576) The problem this commit addresses is that state recovery is not reset on a node that then becomes master with a cluster state that has a state not recovered flag in it. The situation that was observed in a failed test run of MinimumMasterNodesIT.testThreeNodesNoMasterBlock (see below) is that we have 3 master nodes (node_t0, node_t1, node_t2), two of them are shut down (node_t2 remains), when the first one comes back (renamed to node_t4) it becomes leader in term 2 and sends state (with state_not_recovered_block) to node_t2, which accepts. node_t2 becomes leader in term 3, and as it was previously leader in term1 and successfully completed state recovery, does never retry state recovery in term 3. Closes #39172 --- .../elasticsearch/cluster/ClusterState.java | 3 +- .../cluster/coordination/Coordinator.java | 1 - .../elasticsearch/gateway/GatewayService.java | 11 ++-- .../coordination/CoordinatorTests.java | 61 ++++++++++++++++++- 4 files changed, 67 insertions(+), 9 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c77d3f01e5f..398dd908945 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -325,7 +325,8 @@ public class ClusterState implements ToXContentFragment, Diffable public String toString() { StringBuilder sb = new StringBuilder(); final String TAB = " "; - sb.append("cluster uuid: ").append(metaData.clusterUUID()).append("\n"); + sb.append("cluster uuid: ").append(metaData.clusterUUID()) + .append(" [committed: ").append(metaData.clusterUUIDCommitted()).append("]").append("\n"); sb.append("version: ").append(version).append("\n"); sb.append("state uuid: ").append(stateUUID).append("\n"); sb.append("from_diff: ").append(wasReadFromDiff).append("\n"); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index af9a38bec49..bf6ed67f874 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -729,7 +729,6 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery assert followersChecker.getFastResponseState().term == getCurrentTerm() : followersChecker.getFastResponseState(); assert followersChecker.getFastResponseState().mode == getMode() : followersChecker.getFastResponseState(); assert (applierState.nodes().getMasterNodeId() == null) == applierState.blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID); - assert applierState.nodes().getMasterNodeId() == null || applierState.metaData().clusterUUIDCommitted(); assert preVoteCollector.getPreVoteResponse().equals(getPreVoteResponse()) : preVoteCollector + " vs " + getPreVoteResponse(); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 85a9c448991..fc682f1906b 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -86,7 +86,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste private final Runnable recoveryRunnable; - private final AtomicBoolean recovered = new AtomicBoolean(); + private final AtomicBoolean recoveryInProgress = new AtomicBoolean(); private final AtomicBoolean scheduledRecovery = new AtomicBoolean(); @Inject @@ -214,7 +214,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste @Override protected void doRun() { - if (recovered.compareAndSet(false, true)) { + if (recoveryInProgress.compareAndSet(false, true)) { logger.info("recover_after_time [{}] elapsed. performing state recovery...", recoverAfterTime); recoveryRunnable.run(); } @@ -222,7 +222,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste }, recoverAfterTime, ThreadPool.Names.GENERIC); } } else { - if (recovered.compareAndSet(false, true)) { + if (recoveryInProgress.compareAndSet(false, true)) { threadPool.generic().execute(new AbstractRunnable() { @Override public void onFailure(final Exception e) { @@ -240,7 +240,7 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste } private void resetRecoveredFlags() { - recovered.set(false); + recoveryInProgress.set(false); scheduledRecovery.set(false); } @@ -259,6 +259,9 @@ public class GatewayService extends AbstractLifecycleComponent implements Cluste @Override public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) { logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size()); + // reset flag even though state recovery completed, to ensure that if we subsequently become leader again based on a + // not-recovered state, that we again do another state recovery. + resetRecoveredFlags(); } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 62fe3622986..0094a8d722b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode.Role; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Nullable; @@ -70,6 +71,8 @@ import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.discovery.SeedHostsProvider.HostsResolver; import org.elasticsearch.discovery.zen.PublishClusterStateStats; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.gateway.ClusterStateUpdaters; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.MockGatewayMetaState; import org.elasticsearch.indices.cluster.FakeThreadPoolMasterService; @@ -131,6 +134,7 @@ import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MAS import static org.elasticsearch.cluster.coordination.NoMasterBlockService.NO_MASTER_BLOCK_WRITES; import static org.elasticsearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; import static org.elasticsearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; +import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.node.Node.NODE_NAME_SETTING; import static org.elasticsearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.containsString; @@ -191,6 +195,45 @@ public class CoordinatorTests extends ESTestCase { assertEquals(result1, result2); } + /** + * This test was added to verify that state recovery is properly reset on a node after it has become master and successfully + * recovered a state (see {@link GatewayService}). The situation which triggers this with a decent likelihood is as follows: + * 3 master-eligible nodes (leader, follower1, follower2), the followers are shut down (leader remains), when followers come back + * one of them becomes leader and publishes first state (with STATE_NOT_RECOVERED_BLOCK) to old leader, which accepts it. + * Old leader is initiating an election at the same time, and wins election. It becomes leader again, but as it previously + * successfully completed state recovery, is never reset to a state where state recovery can be retried. + */ + public void testStateRecoveryResetAfterPreviousLeadership() { + final Cluster cluster = new Cluster(3); + cluster.runRandomly(); + cluster.stabilise(); + + final ClusterNode leader = cluster.getAnyLeader(); + final ClusterNode follower1 = cluster.getAnyNodeExcept(leader); + final ClusterNode follower2 = cluster.getAnyNodeExcept(leader, follower1); + + // restart follower1 and follower2 + for (ClusterNode clusterNode : Arrays.asList(follower1, follower2)) { + clusterNode.close(); + cluster.clusterNodes.forEach( + cn -> cluster.deterministicTaskQueue.scheduleNow(cn.onNode( + new Runnable() { + @Override + public void run() { + cn.transportService.disconnectFromNode(clusterNode.getLocalNode()); + } + + @Override + public String toString() { + return "disconnect from " + clusterNode.getLocalNode() + " after shutdown"; + } + }))); + cluster.clusterNodes.replaceAll(cn -> cn == clusterNode ? cn.restartedNode() : cn); + } + + cluster.stabilise(); + } + public void testCanUpdateClusterStateAfterStabilisation() { final Cluster cluster = new Cluster(randomIntBetween(1, 5)); cluster.runRandomly(); @@ -1525,6 +1568,10 @@ public class CoordinatorTests extends ESTestCase { assertTrue(leaderId + " has been bootstrapped", leader.coordinator.isInitialConfigurationSet()); assertTrue(leaderId + " exists in its last-applied state", leader.getLastAppliedClusterState().getNodes().nodeExists(leaderId)); + assertThat(leaderId + " has no NO_MASTER_BLOCK", + leader.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(leaderId + " has no STATE_NOT_RECOVERED_BLOCK", + leader.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false)); assertThat(leaderId + " has applied its state ", leader.getLastAppliedClusterState().getVersion(), isEqualToLeaderVersion); for (final ClusterNode clusterNode : clusterNodes) { @@ -1556,6 +1603,8 @@ public class CoordinatorTests extends ESTestCase { equalTo(leader.getLocalNode())); assertThat(nodeId + " has no NO_MASTER_BLOCK", clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlockWithId(NO_MASTER_BLOCK_ID), equalTo(false)); + assertThat(nodeId + " has no STATE_NOT_RECOVERED_BLOCK", + clusterNode.getLastAppliedClusterState().blocks().hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK), equalTo(false)); } else { assertThat(nodeId + " is not following " + leaderId, clusterNode.coordinator.getMode(), is(CANDIDATE)); assertThat(nodeId + " has no master", clusterNode.getLastAppliedClusterState().nodes().getMasterNode(), nullValue()); @@ -1725,7 +1774,8 @@ public class CoordinatorTests extends ESTestCase { } else { nodeEnvironment = null; delegate = new InMemoryPersistedState(0L, - clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L)); + ClusterStateUpdaters.addStateNotRecoveredBlock( + clusterState(0L, 0L, localNode, VotingConfiguration.EMPTY_CONFIG, VotingConfiguration.EMPTY_CONFIG, 0L))); } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1765,8 +1815,9 @@ public class CoordinatorTests extends ESTestCase { clusterState.writeTo(outStream); StreamInput inStream = new NamedWriteableAwareStreamInput(outStream.bytes().streamInput(), new NamedWriteableRegistry(ClusterModule.getNamedWriteables())); + // adapt cluster state to new localNode instance and add blocks delegate = new InMemoryPersistedState(adaptCurrentTerm.apply(oldState.getCurrentTerm()), - ClusterState.readFrom(inStream, newLocalNode)); // adapts it to new localNode instance + ClusterStateUpdaters.addStateNotRecoveredBlock(ClusterState.readFrom(inStream, newLocalNode))); } } catch (IOException e) { throw new UncheckedIOException("Unable to create MockPersistedState", e); @@ -1870,15 +1921,19 @@ public class CoordinatorTests extends ESTestCase { transportService)); final Collection> onJoinValidators = Collections.singletonList((dn, cs) -> extraJoinValidators.forEach(validator -> validator.accept(dn, cs))); + final AllocationService allocationService = ESAllocationTestCase.createAllocationService(Settings.EMPTY); coordinator = new Coordinator("test_node", settings, clusterSettings, transportService, writableRegistry(), - ESAllocationTestCase.createAllocationService(Settings.EMPTY), masterService, this::getPersistedState, + allocationService, masterService, this::getPersistedState, Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get()); masterService.setClusterStatePublisher(coordinator); + final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, + deterministicTaskQueue.getThreadPool(this::onNode), null, null, coordinator); logger.trace("starting up [{}]", localNode); transportService.start(); transportService.acceptIncomingRequests(); coordinator.start(); + gatewayService.start(); clusterService.start(); coordinator.startInitialJoin(); } From 1e0b0f640b41ce5f4f497fdf90b9dbfda271201c Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 28 May 2019 13:56:13 +0200 Subject: [PATCH 28/40] Fix compilation Follow-up to 5598647922c --- .../elasticsearch/cluster/coordination/CoordinatorTests.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 0094a8d722b..d4afca3f567 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1927,7 +1927,7 @@ public class CoordinatorTests extends ESTestCase { Cluster.this::provideSeedHosts, clusterApplierService, onJoinValidators, Randomness.get()); masterService.setClusterStatePublisher(coordinator); final GatewayService gatewayService = new GatewayService(settings, allocationService, clusterService, - deterministicTaskQueue.getThreadPool(this::onNode), null, null, coordinator); + deterministicTaskQueue.getThreadPool(this::onNode), null, coordinator); logger.trace("starting up [{}]", localNode); transportService.start(); From 3079d2d2951915d3465194ee2e3c525133db9dcd Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:47:18 -0400 Subject: [PATCH 29/40] [DOCS] Escape cross-ref link comma for Asciidoctor (#42402) --- docs/reference/rollup/rollup-api.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/rollup/rollup-api.asciidoc b/docs/reference/rollup/rollup-api.asciidoc index 099686fb432..5981336d0a0 100644 --- a/docs/reference/rollup/rollup-api.asciidoc +++ b/docs/reference/rollup/rollup-api.asciidoc @@ -9,7 +9,7 @@ * <>, <>, * <>, <>, -* <> +* <> * <> [float] From b30ca8da28d8be127ad16c383587974b904b7bd7 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:52:59 -0400 Subject: [PATCH 30/40] [DOCS] Fix API Quick Reference rollup attribute for Asciidoctor (#42403) --- docs/reference/rollup/api-quickref.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/rollup/api-quickref.asciidoc b/docs/reference/rollup/api-quickref.asciidoc index 21eefefb4b1..d1ea03b6284 100644 --- a/docs/reference/rollup/api-quickref.asciidoc +++ b/docs/reference/rollup/api-quickref.asciidoc @@ -5,7 +5,7 @@ experimental[] -Most {rollup} endpoints have the following base: +Most rollup endpoints have the following base: [source,js] ---- From 31d2bdca37b7e9fc90241e31c85b695203e38c15 Mon Sep 17 00:00:00 2001 From: James Rodewig Date: Tue, 28 May 2019 08:56:50 -0400 Subject: [PATCH 31/40] [DOCS] Fix Moving Avg Aggregation `deprecated` macro for Asciidoctor (#42405) --- .../aggregations/pipeline/movavg-aggregation.asciidoc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc index 7c80e4797ba..5d0a4b1fb6b 100644 --- a/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc +++ b/docs/reference/aggregations/pipeline/movavg-aggregation.asciidoc @@ -1,9 +1,14 @@ [[search-aggregations-pipeline-movavg-aggregation]] === Moving Average Aggregation +ifdef::asciidoctor[] +deprecated:[6.4.0, "The Moving Average aggregation has been deprecated in favor of the more general <>. The new Moving Function aggregation provides all the same functionality as the Moving Average aggregation, but also provides more flexibility."] +endif::[] +ifndef::asciidoctor[] deprecated[6.4.0, The Moving Average aggregation has been deprecated in favor of the more general <>. The new Moving Function aggregation provides all the same functionality as the Moving Average aggregation, but also provides more flexibility.] +endif::[] Given an ordered series of data, the Moving Average aggregation will slide a window across the data and emit the average value of that window. For example, given the data `[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]`, we can calculate a simple moving From c21745c8abb9fd95193cc9c47dc581cb9ef2f52e Mon Sep 17 00:00:00 2001 From: David Turner Date: Tue, 28 May 2019 15:23:55 +0100 Subject: [PATCH 32/40] Avoid loading retention leases while writing them (#42620) Resolves #41430. --- .../org/elasticsearch/index/seqno/ReplicationTracker.java | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index 219d2096f7f..6c01c721c34 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -346,7 +346,10 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L * @throws IOException if an I/O exception occurs reading the retention leases */ public RetentionLeases loadRetentionLeases(final Path path) throws IOException { - final RetentionLeases retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + final RetentionLeases retentionLeases; + synchronized (retentionLeasePersistenceLock) { + retentionLeases = RetentionLeases.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY, path); + } // TODO after backporting we expect this never to happen in 8.x, so adjust this to throw an exception instead. assert Version.CURRENT.major <= 8 : "throw an exception instead of returning EMPTY on null"; From aea600fe7de17c1a718823e20f161cc081f899f0 Mon Sep 17 00:00:00 2001 From: David Kyle Date: Tue, 28 May 2019 09:15:03 +0100 Subject: [PATCH 33/40] [Ml Data Frame] Return bad_request on preview when config is invalid (#42447) --- ...nsportPreviewDataFrameTransformAction.java | 24 ++++++++----- .../test/data_frame/preview_transforms.yml | 35 +++++++++++++++++++ 2 files changed, 51 insertions(+), 8 deletions(-) diff --git a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java index f4b93cc6ac4..dde9edb37e5 100644 --- a/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java +++ b/x-pack/plugin/data-frame/src/main/java/org/elasticsearch/xpack/dataframe/action/TransportPreviewDataFrameTransformAction.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.dataframe.action.PreviewDataFrameTransformAc import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameIndexerTransformStats; import org.elasticsearch.xpack.core.dataframe.transforms.DataFrameTransformConfig; import org.elasticsearch.xpack.core.dataframe.transforms.SourceConfig; +import org.elasticsearch.xpack.dataframe.transforms.pivot.AggregationResultUtils; import org.elasticsearch.xpack.dataframe.transforms.pivot.Pivot; import java.util.List; @@ -102,14 +103,21 @@ public class TransportPreviewDataFrameTransformAction extends pivot.buildSearchRequest(source, null, NUMBER_OF_PREVIEW_BUCKETS), ActionListener.wrap( r -> { - final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); - DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); - // remove all internal fields - List> results = pivot.extractResults(agg, deducedMappings, stats) - .peek(record -> { - record.keySet().removeIf(k -> k.startsWith("_")); - }).collect(Collectors.toList()); - listener.onResponse(results); + + try { + final CompositeAggregation agg = r.getAggregations().get(COMPOSITE_AGGREGATION_NAME); + DataFrameIndexerTransformStats stats = DataFrameIndexerTransformStats.withDefaultTransformId(); + // remove all internal fields + List> results = pivot.extractResults(agg, deducedMappings, stats) + .peek(record -> { + record.keySet().removeIf(k -> k.startsWith("_")); + }).collect(Collectors.toList()); + + listener.onResponse(results); + } catch (AggregationResultUtils.AggregationExtractionException extractionException) { + listener.onFailure( + new ElasticsearchStatusException(extractionException.getMessage(), RestStatus.BAD_REQUEST)); + } }, listener::onFailure )); diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml index 5e58048b3bf..7b5c4e8cb56 100644 --- a/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml +++ b/x-pack/plugin/src/test/resources/rest-api-spec/test/data_frame/preview_transforms.yml @@ -127,3 +127,38 @@ setup: "aggs": {"avg_response": {"avg": {"field": "responsetime"}}} } } + +--- +"Test preview returns bad request with invalid agg": + - do: + catch: bad_request + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } + + - do: + catch: /mixed object types of nested and non-nested fields \[time.min\]/ + data_frame.preview_data_frame_transform: + body: > + { + "source": { "index": "airline-data" }, + "pivot": { + "group_by": { + "time": {"date_histogram": {"fixed_interval": "1h", "field": "time", "format": "yyyy-MM-DD HH"}}}, + "aggs": { + "avg_response": {"avg": {"field": "responsetime"}}, + "time.min": {"min": {"field": "time"}} + } + } + } + From 130c832e10738308fed8a1c35321ef722dd521e4 Mon Sep 17 00:00:00 2001 From: Vigya Sharma Date: Tue, 28 May 2019 20:12:46 +0530 Subject: [PATCH 34/40] Validate routing commands using updated routing state (#42066) When multiple commands are called in sequence, fetch shards from mutable, up-to-date routing nodes to ensure each command's changes are visible to subsequent commands. This addresses an issue uncovered during work on #41050. --- ...AllocateEmptyPrimaryAllocationCommand.java | 13 +++- .../AllocateReplicaAllocationCommand.java | 26 +++++-- ...AllocateStalePrimaryAllocationCommand.java | 13 +++- .../allocation/AllocationCommandsTests.java | 72 +++++++++++++++++++ 4 files changed, 112 insertions(+), 12 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java index 4d037570dd2..2e3219e67c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateEmptyPrimaryAllocationCommand.java @@ -110,13 +110,20 @@ public class AllocateEmptyPrimaryAllocationCommand extends BasePrimaryAllocation return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting shardRouting; try { - shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (shardRouting.unassigned() == false) { + + ShardRouting shardRouting = null; + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + shardRouting = shard; + break; + } + } + if (shardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java index 709681f2b20..5e1bcd81bb5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateReplicaAllocationCommand.java @@ -23,7 +23,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.allocation.RerouteExplanation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -35,6 +34,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.shard.ShardNotFoundException; import java.io.IOException; +import java.util.ArrayList; import java.util.List; /** @@ -101,20 +101,34 @@ public class AllocateReplicaAllocationCommand extends AbstractAllocateAllocation return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting primaryShardRouting; try { - primaryShardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (primaryShardRouting.unassigned()) { + + ShardRouting primaryShardRouting = null; + for (RoutingNode node : allocation.routingNodes()) { + for (ShardRouting shard : node) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + primaryShardRouting = shard; + break; + } + } + } + if (primaryShardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "trying to allocate a replica shard [" + index + "][" + shardId + "], while corresponding primary shard is still unassigned"); } - List replicaShardRoutings = - allocation.routingTable().shardRoutingTable(index, shardId).replicaShardsWithState(ShardRoutingState.UNASSIGNED); + List replicaShardRoutings = new ArrayList<>(); + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary() == false) { + replicaShardRoutings.add(shard); + } + } + ShardRouting shardRouting; if (replicaShardRoutings.isEmpty()) { return explainOrThrowRejectedCommand(explain, allocation, diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java index f4c9aba17d7..7e645c2cfcb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocateStalePrimaryAllocationCommand.java @@ -108,13 +108,20 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation return explainOrThrowMissingRoutingNode(allocation, explain, discoNode); } - final ShardRouting shardRouting; try { - shardRouting = allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); + allocation.routingTable().shardRoutingTable(index, shardId).primaryShard(); } catch (IndexNotFoundException | ShardNotFoundException e) { return explainOrThrowRejectedCommand(explain, allocation, e); } - if (shardRouting.unassigned() == false) { + + ShardRouting shardRouting = null; + for (ShardRouting shard : allocation.routingNodes().unassigned()) { + if (shard.getIndexName().equals(index) && shard.getId() == shardId && shard.primary()) { + shardRouting = shard; + break; + } + } + if (shardRouting == null) { return explainOrThrowRejectedCommand(explain, allocation, "primary [" + index + "][" + shardId + "] is already assigned"); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java index c966e3cac27..1405be54fd5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationCommandsTests.java @@ -677,4 +677,76 @@ public class AllocationCommandsTests extends ESAllocationTestCase { assertEquals("[move_allocation] can't move [test][0] from " + node2 + " to " + node1 + ": source [" + node2.getName() + "] is not a data node.", e.getMessage()); } + + public void testConflictingCommandsInSingleRequest() { + AllocationService allocation = createAllocationService(Settings.builder() + .put(EnableAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none") + .put(EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none") + .build()); + + final String index1 = "test1"; + final String index2 = "test2"; + final String index3 = "test3"; + logger.info("--> building initial routing table"); + MetaData metaData = MetaData.builder() + .put(IndexMetaData.builder(index1).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .put(IndexMetaData.builder(index2).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .put(IndexMetaData.builder(index3).settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton("randomAllocID")) + .putInSyncAllocationIds(1, Collections.singleton("randomAllocID2"))) + .build(); + RoutingTable routingTable = RoutingTable.builder() + .addAsRecovery(metaData.index(index1)) + .addAsRecovery(metaData.index(index2)) + .addAsRecovery(metaData.index(index3)) + .build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); + + final String node1 = "node1"; + final String node2 = "node2"; + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder() + .add(newNode(node1)) + .add(newNode(node2)) + ).build(); + final ClusterState finalClusterState = allocation.reroute(clusterState, "reroute"); + + logger.info("--> allocating same index primary in multiple commands should fail"); + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(finalClusterState, + new AllocationCommands( + new AllocateStalePrimaryAllocationCommand(index1, 0, node1, true), + new AllocateStalePrimaryAllocationCommand(index1, 0, node2, true) + ), false, false); + }).getMessage(), containsString("primary [" + index1 + "][0] is already assigned")); + + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(finalClusterState, + new AllocationCommands( + new AllocateEmptyPrimaryAllocationCommand(index2, 0, node1, true), + new AllocateEmptyPrimaryAllocationCommand(index2, 0, node2, true) + ), false, false); + }).getMessage(), containsString("primary [" + index2 + "][0] is already assigned")); + + + clusterState = allocation.reroute(clusterState, + new AllocationCommands(new AllocateEmptyPrimaryAllocationCommand(index3, 0, node1, true)), false, false).getClusterState(); + clusterState = allocation.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); + + final ClusterState updatedClusterState = clusterState; + assertThat(updatedClusterState.getRoutingNodes().node(node1).shardsWithState(STARTED).size(), equalTo(1)); + + logger.info("--> subsequent replica allocation fails as all configured replicas have been allocated"); + assertThat(expectThrows(IllegalArgumentException.class, () -> { + allocation.reroute(updatedClusterState, + new AllocationCommands( + new AllocateReplicaAllocationCommand(index3, 0, node2), + new AllocateReplicaAllocationCommand(index3, 0, node2) + ), false, false); + }).getMessage(), containsString("all copies of [" + index3 + "][0] are already assigned. Use the move allocation command instead")); + } } From d06618a70d66bfe4a5c0f99fcd19ad16e8eebe90 Mon Sep 17 00:00:00 2001 From: Benjamin Trent Date: Tue, 28 May 2019 11:36:30 -0400 Subject: [PATCH 35/40] [ML] adding delayed_data_check_config to datafeed update docs (#42095) (#42626) * [ML] adding delayed_data_check_config to datafeed update docs * [DOCS] Edits delayed data configuration details --- .../ml/apis/datafeedresource.asciidoc | 30 ++++++++++--------- docs/reference/ml/apis/put-datafeed.asciidoc | 9 +++--- .../ml/apis/update-datafeed.asciidoc | 9 +++++- 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/docs/reference/ml/apis/datafeedresource.asciidoc b/docs/reference/ml/apis/datafeedresource.asciidoc index 33fce3dbf7c..5c1e3e74a6a 100644 --- a/docs/reference/ml/apis/datafeedresource.asciidoc +++ b/docs/reference/ml/apis/datafeedresource.asciidoc @@ -61,12 +61,12 @@ A {dfeed} resource has the following properties: `delayed_data_check_config`:: (object) Specifies whether the data feed checks for missing data and - and the size of the window. For example: + the size of the window. For example: `{"enabled": true, "check_window": "1h"}` See <>. [[ml-datafeed-chunking-config]] -==== Chunking Configuration Objects +==== Chunking configuration objects {dfeeds-cap} might be required to search over long time periods, for several months or years. This search is split into time chunks in order to ensure the load @@ -88,31 +88,33 @@ A chunking configuration object has the following properties: For example: `3h`. [[ml-datafeed-delayed-data-check-config]] -==== Delayed Data Check Configuration Objects +==== Delayed data check configuration objects The {dfeed} can optionally search over indices that have already been read in -an effort to find if any data has since been added to the index. If missing data -is found, it is a good indication that the `query_delay` option is set too low and -the data is being indexed after the {dfeed} has passed that moment in time. See +an effort to determine whether any data has subsequently been added to the index. +If missing data is found, it is a good indication that the `query_delay` option +is set too low and the data is being indexed after the {dfeed} has passed that +moment in time. See {stack-ov}/ml-delayed-data-detection.html[Working with delayed data]. -This check only runs on real-time {dfeeds} +This check runs only on real-time {dfeeds}. The configuration object has the following properties: `enabled`:: - (boolean) Should the {dfeed} periodically check for data being indexed after reading. - Defaults to `true` + (boolean) Specifies whether the {dfeed} periodically checks for delayed data. + Defaults to `true`. `check_window`:: - (time units) The window of time before the latest finalized bucket that should be searched - for late data. Defaults to `null` which causes an appropriate `check_window` to be calculated - when the real-time {dfeed} runs. - The default `check_window` span calculation is the max between `2h` or `8 * bucket_span`. + (time units) The window of time that is searched for late data. This window of + time ends with the latest finalized bucket. It defaults to `null`, which + causes an appropriate `check_window` to be calculated when the real-time + {dfeed} runs. In particular, the default `check_window` span calculation is + based on the maximum of `2h` or `8 * bucket_span`. [float] [[ml-datafeed-counts]] -==== {dfeed-cap} Counts +==== {dfeed-cap} counts The get {dfeed} statistics API provides information about the operational progress of a {dfeed}. All of these properties are informational; you cannot diff --git a/docs/reference/ml/apis/put-datafeed.asciidoc b/docs/reference/ml/apis/put-datafeed.asciidoc index 52728dd093d..2e0f6700191 100644 --- a/docs/reference/ml/apis/put-datafeed.asciidoc +++ b/docs/reference/ml/apis/put-datafeed.asciidoc @@ -45,6 +45,11 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df (object) Specifies how data searches are split into time chunks. See <>. +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See + <>. + `frequency`:: (time units) The interval at which scheduled queries are made while the {dfeed} runs in real time. The default value is either the bucket span for short @@ -82,10 +87,6 @@ IMPORTANT: You must use {kib} or this API to create a {dfeed}. Do not put a {df (unsigned integer) The `size` parameter that is used in {es} searches. The default value is `1000`. -`delayed_data_check_config`:: - (object) Specifies if and with how large a window should the data feed check - for missing data. See <>. - For more information about these properties, see <>. diff --git a/docs/reference/ml/apis/update-datafeed.asciidoc b/docs/reference/ml/apis/update-datafeed.asciidoc index a370c1acef9..63878913c7f 100644 --- a/docs/reference/ml/apis/update-datafeed.asciidoc +++ b/docs/reference/ml/apis/update-datafeed.asciidoc @@ -14,7 +14,10 @@ Updates certain properties of a {dfeed}. `POST _ml/datafeeds//_update` -//===== Description +===== Description + +NOTE: If you update the `delayed_data_check_config` property, you must stop and +start the {dfeed} for the change to be applied. ==== Path Parameters @@ -32,6 +35,10 @@ The following properties can be updated after the {dfeed} is created: `chunking_config`:: (object) Specifies how data searches are split into time chunks. See <>. + +`delayed_data_check_config`:: + (object) Specifies whether the data feed checks for missing data and + the size of the window. See <>. `frequency`:: (time units) The interval at which scheduled queries are made while the From 6166fed6f1fd51de02497bc0840be7909ac2647f Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 17:58:00 +0200 Subject: [PATCH 36/40] Fix BulkProcessorRetryIT (#41700) (#42618) * Now that we process the bulk requests themselves on the WRITE threadpool, they can run out of retries too like the item requests even when backoff is active * Fixes #41324 by using the same logic that checks failed item requests for their retry status for the top level bulk requests as well --- .../client/BulkProcessorRetryIT.java | 38 +++++++++++-------- .../action/bulk/BulkProcessorRetryIT.java | 38 +++++++++++-------- 2 files changed, 46 insertions(+), 30 deletions(-) diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java index c18c4363897..77877c46f1a 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/BulkProcessorRetryIT.java @@ -82,6 +82,7 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + internalPolicy.logResponse(failure); responses.add(failure); latch.countDown(); } @@ -105,16 +106,8 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); if (failure.getStatus() == RestStatus.TOO_MANY_REQUESTS) { if (rejectedExecutionExpected == false) { - Iterator backoffState = internalPolicy.backoffStateFor(bulkResponse); - assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState); - if (backoffState.hasNext()) { - // we're not expecting that we overwhelmed it even once when we maxed out the number of retries - throw new AssertionError("Got rejected although backoff policy would allow more retries", - failure.getCause()); - } else { - rejectedAfterAllRetries = true; - logger.debug("We maxed out the number of bulk retries and got rejected (this is ok)."); - } + assertRetriedCorrectly(internalPolicy, bulkResponse, failure.getCause()); + rejectedAfterAllRetries = true; } } else { throw new AssertionError("Unexpected failure with status: " + failure.getStatus()); @@ -123,8 +116,12 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { } } else { if (response instanceof RemoteTransportException - && ((RemoteTransportException) response).status() == RestStatus.TOO_MANY_REQUESTS && rejectedExecutionExpected) { - // ignored, we exceeded the write queue size with dispatching the initial bulk request + && ((RemoteTransportException) response).status() == RestStatus.TOO_MANY_REQUESTS) { + if (rejectedExecutionExpected == false) { + assertRetriedCorrectly(internalPolicy, response, ((Throwable) response).getCause()); + rejectedAfterAllRetries = true; + } + // ignored, we exceeded the write queue size when dispatching the initial bulk request } else { Throwable t = (Throwable) response; // we're not expecting any other errors @@ -146,6 +143,17 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { } + private void assertRetriedCorrectly(CorrelatingBackoffPolicy internalPolicy, Object bulkResponse, Throwable failure) { + Iterator backoffState = internalPolicy.backoffStateFor(bulkResponse); + assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState); + if (backoffState.hasNext()) { + // we're not expecting that we overwhelmed it even once when we maxed out the number of retries + throw new AssertionError("Got rejected although backoff policy would allow more retries", failure); + } else { + logger.debug("We maxed out the number of bulk retries and got rejected (this is ok)."); + } + } + private static MultiGetRequest indexDocs(BulkProcessor processor, int numDocs) { MultiGetRequest multiGetRequest = new MultiGetRequest(); for (int i = 1; i <= numDocs; i++) { @@ -164,7 +172,7 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ private static class CorrelatingBackoffPolicy extends BackoffPolicy { - private final Map> correlations = new ConcurrentHashMap<>(); + private final Map> correlations = new ConcurrentHashMap<>(); // this is intentionally *not* static final. We will only ever have one instance of this class per test case and want the // thread local to be eligible for garbage collection right after the test to avoid leaks. private final ThreadLocal> iterators = new ThreadLocal<>(); @@ -175,13 +183,13 @@ public class BulkProcessorRetryIT extends ESRestHighLevelClientTestCase { this.delegate = delegate; } - public Iterator backoffStateFor(BulkResponse response) { + public Iterator backoffStateFor(Object response) { return correlations.get(response); } // Assumption: This method is called from the same thread as the last call to the internal iterator's #hasNext() / #next() // see also Retry.AbstractRetryHandler#onResponse(). - public void logResponse(BulkResponse response) { + public void logResponse(Object response) { Iterator iterator = iterators.get(); // did we ever retry? if (iterator != null) { diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java index e4b6fff9fc3..e7285ff6f97 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorRetryIT.java @@ -93,6 +93,7 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { @Override public void afterBulk(long executionId, BulkRequest request, Throwable failure) { + internalPolicy.logResponse(failure); responses.add(failure); latch.countDown(); } @@ -117,16 +118,8 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { BulkItemResponse.Failure failure = bulkItemResponse.getFailure(); if (failure.getStatus() == RestStatus.TOO_MANY_REQUESTS) { if (rejectedExecutionExpected == false) { - Iterator backoffState = internalPolicy.backoffStateFor(bulkResponse); - assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState); - if (backoffState.hasNext()) { - // we're not expecting that we overwhelmed it even once when we maxed out the number of retries - throw new AssertionError("Got rejected although backoff policy would allow more retries", - failure.getCause()); - } else { - rejectedAfterAllRetries = true; - logger.debug("We maxed out the number of bulk retries and got rejected (this is ok)."); - } + assertRetriedCorrectly(internalPolicy, bulkResponse, failure.getCause()); + rejectedAfterAllRetries = true; } } else { throw new AssertionError("Unexpected failure status: " + failure.getStatus()); @@ -135,8 +128,12 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { } } else { if (response instanceof RemoteTransportException - && ((RemoteTransportException) response).status() == RestStatus.TOO_MANY_REQUESTS && rejectedExecutionExpected) { - // ignored, we exceeded the write queue size with dispatching the initial bulk request + && ((RemoteTransportException) response).status() == RestStatus.TOO_MANY_REQUESTS) { + if (rejectedExecutionExpected == false) { + assertRetriedCorrectly(internalPolicy, response, ((Throwable) response).getCause()); + rejectedAfterAllRetries = true; + } + // ignored, we exceeded the write queue size when dispatching the initial bulk request } else { Throwable t = (Throwable) response; // we're not expecting any other errors @@ -163,6 +160,17 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { } } + private void assertRetriedCorrectly(CorrelatingBackoffPolicy internalPolicy, Object bulkResponse, Throwable failure) { + Iterator backoffState = internalPolicy.backoffStateFor(bulkResponse); + assertNotNull("backoffState is null (indicates a bulk request got rejected without retry)", backoffState); + if (backoffState.hasNext()) { + // we're not expecting that we overwhelmed it even once when we maxed out the number of retries + throw new AssertionError("Got rejected although backoff policy would allow more retries", failure); + } else { + logger.debug("We maxed out the number of bulk retries and got rejected (this is ok)."); + } + } + private static void indexDocs(BulkProcessor processor, int numDocs) { for (int i = 1; i <= numDocs; i++) { processor.add(client() @@ -183,7 +191,7 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { * as the last call to the backoff policy's iterator. The advantage is that this is non-invasive to the rest of the production code. */ private static class CorrelatingBackoffPolicy extends BackoffPolicy { - private final Map> correlations = new ConcurrentHashMap<>(); + private final Map> correlations = new ConcurrentHashMap<>(); // this is intentionally *not* static final. We will only ever have one instance of this class per test case and want the // thread local to be eligible for garbage collection right after the test to avoid leaks. private final ThreadLocal> iterators = new ThreadLocal<>(); @@ -194,13 +202,13 @@ public class BulkProcessorRetryIT extends ESIntegTestCase { this.delegate = delegate; } - public Iterator backoffStateFor(BulkResponse response) { + public Iterator backoffStateFor(Object response) { return correlations.get(response); } // Assumption: This method is called from the same thread as the last call to the internal iterator's #hasNext() / #next() // see also Retry.AbstractRetryHandler#onResponse(). - public void logResponse(BulkResponse response) { + public void logResponse(Object response) { Iterator iterator = iterators.get(); // did we ever retry? if (iterator != null) { From 0e92ef1843a527f740598be7fa34fd6027e02408 Mon Sep 17 00:00:00 2001 From: Armin Braun Date: Tue, 28 May 2019 17:58:23 +0200 Subject: [PATCH 37/40] Fix Incorrect Time Math in MockTransport (#42595) (#42617) * Fix Incorrect Time Math in MockTransport * The timeunit here must be nanos for the current time (we even convert it accordingly in the logging) * Also, changed the log message when dumping stack traces a little to make it easier to grep for (otherwise it's the same as the message on unregister) --- .../org/elasticsearch/transport/nio/MockNioTransport.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java index a261d68cbb3..bd260da169c 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/nio/MockNioTransport.java @@ -371,11 +371,11 @@ public class MockNioTransport extends TcpTransport { private void logLongRunningExecutions() { for (Map.Entry entry : registry.entrySet()) { - final long elapsedTime = threadPool.relativeTimeInMillis() - entry.getValue(); - if (elapsedTime > WARN_THRESHOLD) { + final long elapsedTimeInNanos = threadPool.relativeTimeInNanos() - entry.getValue(); + if (elapsedTimeInNanos > WARN_THRESHOLD) { final Thread thread = entry.getKey(); - logger.warn("Slow execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), - TimeUnit.NANOSECONDS.toMillis(elapsedTime), + logger.warn("Potentially blocked execution on network thread [{}] [{} milliseconds]: \n{}", thread.getName(), + TimeUnit.NANOSECONDS.toMillis(elapsedTimeInNanos), Arrays.stream(thread.getStackTrace()).map(Object::toString).collect(Collectors.joining("\n"))); } } From 8ff37e99f5590cabaf9fb33f5128b5a816b8d440 Mon Sep 17 00:00:00 2001 From: lcawl Date: Tue, 28 May 2019 08:52:45 -0700 Subject: [PATCH 38/40] [DOCS] Removes coming tags --- docs/reference/release-notes/7.0.0-rc1.asciidoc | 2 -- docs/reference/release-notes/7.1.asciidoc | 2 -- docs/reference/release-notes/highlights-7.0.0.asciidoc | 2 -- 3 files changed, 6 deletions(-) diff --git a/docs/reference/release-notes/7.0.0-rc1.asciidoc b/docs/reference/release-notes/7.0.0-rc1.asciidoc index 5e303aac6b8..8d569529046 100644 --- a/docs/reference/release-notes/7.0.0-rc1.asciidoc +++ b/docs/reference/release-notes/7.0.0-rc1.asciidoc @@ -1,8 +1,6 @@ [[release-notes-7.0.0-rc1]] == {es} version 7.0.0-rc1 -coming[7.0.0-rc1] - Also see <>. [[breaking-7.0.0-rc1]] diff --git a/docs/reference/release-notes/7.1.asciidoc b/docs/reference/release-notes/7.1.asciidoc index 8fa7509e40e..ec93927513b 100644 --- a/docs/reference/release-notes/7.1.asciidoc +++ b/docs/reference/release-notes/7.1.asciidoc @@ -1,8 +1,6 @@ [[release-notes-7.1.1]] == {es} version 7.1.1 -coming[7.1.1] - Also see <>. [[bug-7.1.1]] diff --git a/docs/reference/release-notes/highlights-7.0.0.asciidoc b/docs/reference/release-notes/highlights-7.0.0.asciidoc index c48bf85c655..b5aa2e76ff8 100644 --- a/docs/reference/release-notes/highlights-7.0.0.asciidoc +++ b/docs/reference/release-notes/highlights-7.0.0.asciidoc @@ -4,8 +4,6 @@ 7.0.0 ++++ -coming[7.0.0] - //NOTE: The notable-highlights tagged regions are re-used in the //Installation and Upgrade Guide From 77fc7b210767e2797c90df31f2bc169082108596 Mon Sep 17 00:00:00 2001 From: Lisa Cawley Date: Tue, 28 May 2019 09:04:02 -0700 Subject: [PATCH 39/40] [DOCS] Reorg monitoring configuration for re-use (#42547) --- .../configuring-metricbeat.asciidoc | 121 ++++++++++-------- 1 file changed, 68 insertions(+), 53 deletions(-) diff --git a/docs/reference/monitoring/configuring-metricbeat.asciidoc b/docs/reference/monitoring/configuring-metricbeat.asciidoc index df578e88da6..e337c5bf7d3 100644 --- a/docs/reference/monitoring/configuring-metricbeat.asciidoc +++ b/docs/reference/monitoring/configuring-metricbeat.asciidoc @@ -17,6 +17,8 @@ image::monitoring/images/metricbeat.png[Example monitoring architecture] To learn about monitoring in general, see {stack-ov}/xpack-monitoring.html[Monitoring the {stack}]. +//NOTE: The tagged regions are re-used in the Stack Overview. + . Enable the collection of monitoring data. Set `xpack.monitoring.collection.enabled` to `true` on each node in the production cluster. By default, it is is disabled (`false`). @@ -71,13 +73,13 @@ PUT _cluster/settings Leave `xpack.monitoring.enabled` set to its default value (`true`). -- -. On each {es} node in the production cluster: +. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}] on each +{es} node in the production cluster. -.. {metricbeat-ref}/metricbeat-installation.html[Install {metricbeat}]. - -.. Enable the {es} module in {metricbeat}. + +. Enable the {es} {xpack} module in {metricbeat} on each {es} node. + + -- +// tag::enable-es-module[] For example, to enable the default configuration in the `modules.d` directory, run the following command: @@ -89,39 +91,57 @@ metricbeat modules enable elasticsearch-xpack For more information, see {metricbeat-ref}/configuration-metricbeat.html[Specify which modules to run] and {metricbeat-ref}/metricbeat-module-elasticsearch.html[{es} module]. + +// end::enable-es-module[] -- -.. By default the module will collect {es} monitoring metrics from `http://localhost:9200`. -If the local {es} node has a different address, you must specify it via the `hosts` setting -in the `modules.d/elasticsearch-xpack.yml` file. - -.. If Elastic {security-features} are enabled, you must also provide a user ID -and password so that {metricbeat} can collect metrics successfully. - -... Create a user on the production cluster that has the -{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. -Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. - -... Add the `username` and `password` settings to the {es} module configuration -file. +. Configure the {es} {xpack} module in {metricbeat} on each {es} node. + + -- -For example, add the following settings in the `modules.d/elasticsearch-xpack.yml` file: +// tag::configure-es-module[] +The `modules.d/elasticsearch-xpack.yml` file contains the following settings: [source,yaml] ---------------------------------- -- module: elasticsearch - ... - username: remote_monitoring_user - password: YOUR_PASSWORD + - module: elasticsearch + metricsets: + - ccr + - cluster_stats + - index + - index_recovery + - index_summary + - ml_job + - node_stats + - shard + period: 10s + hosts: ["http://localhost:9200"] + #username: "user" + #password: "secret" + xpack.enabled: true ---------------------------------- + +By default, the module collects {es} monitoring metrics from +`http://localhost:9200`. If that host and port number are not correct, you must +update the `hosts` setting. If you configured {es} to use encrypted +communications, you must access it via HTTPS. For example, use a `hosts` setting +like `https://localhost:9200`. +// end::configure-es-module[] + +// tag::remote-monitoring-user[] +If Elastic {security-features} are enabled, you must also provide a user ID +and password so that {metricbeat} can collect metrics successfully: + +.. Create a user on the production cluster that has the +{stack-ov}/built-in-roles.html[`remote_monitoring_collector` built-in role]. +Alternatively, use the +{stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. + +.. Add the `username` and `password` settings to the {es} module configuration +file. +// end::remote-monitoring-user[] -- -.. If you configured {es} to use <>, -you must access it via HTTPS. For example, use a `hosts` setting like -`https://localhost:9200` in the `modules.d/elasticsearch-xpack.yml` file. - -.. Identify where to send the monitoring data. + +. Identify where to send the monitoring data. + + -- TIP: In production environments, we strongly recommend using a separate cluster @@ -136,48 +156,43 @@ configuration file (`metricbeat.yml`): [source,yaml] ---------------------------------- output.elasticsearch: + # Array of hosts to connect to. hosts: ["http://es-mon-1:9200", "http://es-mon2:9200"] <1> + + # Optional protocol and basic auth credentials. + #protocol: "https" + #username: "elastic" + #password: "changeme" ---------------------------------- <1> In this example, the data is stored on a monitoring cluster with nodes `es-mon-1` and `es-mon-2`. +If you configured the monitoring cluster to use encrypted communications, you +must access it via HTTPS. For example, use a `hosts` setting like +`https://es-mon-1:9200`. + IMPORTANT: The {es} {monitor-features} use ingest pipelines, therefore the cluster that stores the monitoring data must have at least one <>. -For more information about these configuration options, see -{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. --- +If {es} {security-features} are enabled on the monitoring cluster, you must +provide a valid user ID and password so that {metricbeat} can send metrics +successfully: -.. If {es} {security-features} are enabled on the monitoring cluster, you -must provide a valid user ID and password so that {metricbeat} can send metrics -successfully. - -... Create a user on the monitoring cluster that has the +.. Create a user on the monitoring cluster that has the {stack-ov}/built-in-roles.html[`remote_monitoring_agent` built-in role]. Alternatively, use the {stack-ov}/built-in-users.html[`remote_monitoring_user` built-in user]. -... Add the `username` and `password` settings to the {es} output information in -the {metricbeat} configuration file (`metricbeat.yml`): -+ --- -[source,yaml] ----------------------------------- -output.elasticsearch: - ... - username: remote_monitoring_user - password: YOUR_PASSWORD ----------------------------------- +.. Add the `username` and `password` settings to the {es} output information in +the {metricbeat} configuration file. + +For more information about these configuration options, see +{metricbeat-ref}/elasticsearch-output.html[Configure the {es} output]. -- -.. If you configured the monitoring cluster to use -<>, you must access it via -HTTPS. For example, use a `hosts` setting like `https://es-mon-1:9200` in the -`metricbeat.yml` file. +. <> on each node. -. <>. - -. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}]. +. {metricbeat-ref}/metricbeat-starting.html[Start {metricbeat}] on each node. . {kibana-ref}/monitoring-data.html[View the monitoring data in {kib}]. From 6362ac14ad5159404c0d872c66a870b1a75ae9f7 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 28 May 2019 13:04:19 -0400 Subject: [PATCH 40/40] Lazily compute Java 8 home in reindex configuration (#42630) In the reindex from old tests we require Java 8. Today when configuring the reindex from old tests, we eagerly evalulate Java 8 home, which means that we require JAVA8_HOME to be set even if the reindex from old test tasks are not in the task graph. This is an onerous requirement if, for example, all that you want to do is build a distribution. This commit addresses this by making evaluation of Java 8 home lazy, so that it is only done and required if the reindex from old test tasks would be executed. --- modules/reindex/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/reindex/build.gradle b/modules/reindex/build.gradle index da184deedaa..260c8dcc1df 100644 --- a/modules/reindex/build.gradle +++ b/modules/reindex/build.gradle @@ -125,7 +125,7 @@ if (Os.isFamily(Os.FAMILY_WINDOWS)) { dependsOn unzip executable = new File(project.runtimeJavaHome, 'bin/java') env 'CLASSPATH', "${ -> project.configurations.oldesFixture.asPath }" - env 'JAVA_HOME', getJavaHome(it, 8) + env 'JAVA_HOME', "${ -> getJavaHome(it, 8)}" args 'oldes.OldElasticsearch', baseDir, unzip.temporaryDir,