From f2892f1bed9aedc7e62b05a01f7679e116f6b634 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 5 Jun 2018 12:42:13 +0200 Subject: [PATCH 1/8] Add a doc value format to binary fields. (#30860) This will be necessary for the `docvalue_fields` option to work correctly once we use the field's doc-value format to format doc-value fields. Binary values are formatted as base64-encoded strings. --- .../index/mapper/BinaryFieldMapper.java | 6 +++ .../elasticsearch/search/DocValueFormat.java | 45 +++++++++++++++++++ .../elasticsearch/search/SearchModule.java | 1 + .../search/DocValueFormatTests.java | 14 ++++++ .../search/fields/SearchFieldsIT.java | 14 ++++-- 5 files changed, 77 insertions(+), 3 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java index 1838b60050e..e19bdb67083 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/BinaryFieldMapper.java @@ -40,6 +40,8 @@ import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.QueryShardException; +import org.elasticsearch.search.DocValueFormat; +import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.Base64; @@ -104,6 +106,10 @@ public class BinaryFieldMapper extends FieldMapper { return CONTENT_TYPE; } + @Override + public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) { + return DocValueFormat.BINARY; + } @Override public BytesReference valueForDisplay(Object value) { diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 8677370fc99..242e0887473 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -39,6 +39,7 @@ import java.text.DecimalFormatSymbols; import java.text.NumberFormat; import java.text.ParseException; import java.util.Arrays; +import java.util.Base64; import java.util.Locale; import java.util.Objects; import java.util.function.LongSupplier; @@ -121,6 +122,50 @@ public interface DocValueFormat extends NamedWriteable { } }; + DocValueFormat BINARY = new DocValueFormat() { + + @Override + public String getWriteableName() { + return "binary"; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + } + + @Override + public Object format(long value) { + throw new UnsupportedOperationException(); + } + + @Override + public Object format(double value) { + throw new UnsupportedOperationException(); + } + + @Override + public String format(BytesRef value) { + return Base64.getEncoder() + .withoutPadding() + .encodeToString(Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length)); + } + + @Override + public long parseLong(String value, boolean roundUp, LongSupplier now) { + throw new UnsupportedOperationException(); + } + + @Override + public double parseDouble(String value, boolean roundUp, LongSupplier now) { + throw new UnsupportedOperationException(); + } + + @Override + public BytesRef parseBytesRef(String value) { + return new BytesRef(Base64.getDecoder().decode(value)); + } + }; + final class DateTime implements DocValueFormat { public static final String NAME = "date_time"; diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index 66ea407f42a..869dfe995ed 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -645,6 +645,7 @@ public class SearchModule { registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH); registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP); registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW); + registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY); } /** diff --git a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java index e5cfbf98b3d..01906279474 100644 --- a/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java +++ b/server/src/test/java/org/elasticsearch/search/DocValueFormatTests.java @@ -44,6 +44,7 @@ public class DocValueFormatTests extends ESTestCase { entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP)); entries.add(new Entry(DocValueFormat.class, DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW)); + entries.add(new Entry(DocValueFormat.class, DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY)); NamedWriteableRegistry registry = new NamedWriteableRegistry(entries); BytesStreamOutput out = new BytesStreamOutput(); @@ -82,6 +83,11 @@ public class DocValueFormatTests extends ESTestCase { out.writeNamedWriteable(DocValueFormat.RAW); in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); assertSame(DocValueFormat.RAW, in.readNamedWriteable(DocValueFormat.class)); + + out = new BytesStreamOutput(); + out.writeNamedWriteable(DocValueFormat.BINARY); + in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry); + assertSame(DocValueFormat.BINARY, in.readNamedWriteable(DocValueFormat.class)); } public void testRawFormat() { @@ -96,6 +102,14 @@ public class DocValueFormatTests extends ESTestCase { assertEquals("abc", DocValueFormat.RAW.format(new BytesRef("abc"))); } + public void testBinaryFormat() { + assertEquals("", DocValueFormat.BINARY.format(new BytesRef())); + assertEquals("KmQ", DocValueFormat.BINARY.format(new BytesRef(new byte[] {42, 100}))); + + assertEquals(new BytesRef(), DocValueFormat.BINARY.parseBytesRef("")); + assertEquals(new BytesRef(new byte[] {42, 100}), DocValueFormat.BINARY.parseBytesRef("KmQ")); + } + public void testBooleanFormat() { assertEquals(false, DocValueFormat.BOOLEAN.format(0)); assertEquals(true, DocValueFormat.BOOLEAN.format(1)); diff --git a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java index a8a2669ef9b..ab5387b6e3f 100644 --- a/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java +++ b/server/src/test/java/org/elasticsearch/search/fields/SearchFieldsIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.search.fields; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequestBuilder; import org.elasticsearch.action.search.SearchResponse; @@ -700,7 +701,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(fields.get("test_field").getValue(), equalTo("foobar")); } - public void testFieldsPulledFromFieldData() throws Exception { + public void testDocValueFields() throws Exception { createIndex("test"); String mapping = Strings @@ -744,6 +745,7 @@ public class SearchFieldsIT extends ESIntegTestCase { .endObject() .startObject("binary_field") .field("type", "binary") + .field("doc_values", true) // off by default on binary fields .endObject() .startObject("ip_field") .field("type", "ip") @@ -766,6 +768,7 @@ public class SearchFieldsIT extends ESIntegTestCase { .field("double_field", 6.0d) .field("date_field", Joda.forPattern("dateOptionalTime").printer().print(date)) .field("boolean_field", true) + .field("binary_field", new byte[] {42, 100}) .field("ip_field", "::1") .endObject()).execute().actionGet(); @@ -782,6 +785,7 @@ public class SearchFieldsIT extends ESIntegTestCase { .addDocValueField("double_field") .addDocValueField("date_field") .addDocValueField("boolean_field") + .addDocValueField("binary_field") .addDocValueField("ip_field"); SearchResponse searchResponse = builder.execute().actionGet(); @@ -790,7 +794,7 @@ public class SearchFieldsIT extends ESIntegTestCase { Set fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", - "ip_field"))); + "binary_field", "ip_field"))); assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); @@ -802,6 +806,8 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), + equalTo(new BytesRef(new byte[] {42, 100}))); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); builder = client().prepareSearch().setQuery(matchAllQuery()) @@ -815,6 +821,7 @@ public class SearchFieldsIT extends ESIntegTestCase { .addDocValueField("double_field", "use_field_mapping") .addDocValueField("date_field", "use_field_mapping") .addDocValueField("boolean_field", "use_field_mapping") + .addDocValueField("binary_field", "use_field_mapping") .addDocValueField("ip_field", "use_field_mapping"); searchResponse = builder.execute().actionGet(); @@ -823,7 +830,7 @@ public class SearchFieldsIT extends ESIntegTestCase { fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet()); assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field", "float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field", - "ip_field"))); + "binary_field", "ip_field"))); assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1")); assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2")); @@ -836,6 +843,7 @@ public class SearchFieldsIT extends ESIntegTestCase { assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true)); assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo")); assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo")); + assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ")); assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1")); builder = client().prepareSearch().setQuery(matchAllQuery()) From 9531b7bbcbbd3391d9f605c2c4aa42a123aaf3bd Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Tue, 5 Jun 2018 13:00:43 +0200 Subject: [PATCH 2/8] Add BlobContainer.writeBlobAtomic() (#30902) This commit adds a new writeBlobAtomic() method to the BlobContainer interface that can be implemented by repository implementations which support atomic writes operations. When the BlobContainer implementation does not provide a specific implementation of writeBlobAtomic(), then the writeBlob() method is used. Related to #30680 --- .../resources/checkstyle_suppressions.xml | 2 - .../common/blobstore/BlobContainer.java | 23 ++++++ .../common/blobstore/fs/FsBlobContainer.java | 48 +++++++++++- .../blobstore/BlobStoreRepository.java | 19 +---- .../blobstore/ChecksumBlobStoreFormat.java | 71 +++++------------- .../blobstore/fs/FsBlobContainerTests.java | 40 ++++++++++ .../{ => fs}/FsBlobStoreContainerTests.java | 16 ++-- .../blobstore/{ => fs}/FsBlobStoreTests.java | 17 +++-- .../AbstractSnapshotIntegTestCase.java | 2 +- .../snapshots/BlobStoreFormatIT.java | 42 +---------- .../mockstore/BlobContainerWrapper.java | 10 +++ .../snapshots/mockstore/MockRepository.java | 75 +++++++++++++------ .../ESBlobStoreContainerTestCase.java | 6 +- 13 files changed, 228 insertions(+), 143 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java rename server/src/test/java/org/elasticsearch/common/blobstore/{ => fs}/FsBlobStoreContainerTests.java (75%) rename server/src/test/java/org/elasticsearch/common/blobstore/{ => fs}/FsBlobStoreTests.java (84%) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index 609a7cf2ea6..c3956ca9d40 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -505,8 +505,6 @@ - - diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 6b9992e7e4c..7e3a385443f 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -74,6 +74,29 @@ public interface BlobContainer { */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException; + /** + * Reads blob content from the input stream and writes it to the container in a new blob with the given name, + * using an atomic write operation if the implementation supports it. When the BlobContainer implementation + * does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then + * the {@link #writeBlob(String, InputStream, long)} method is used. + * + * This method assumes the container does not already contain a blob of the same blobName. If a blob by the + * same name already exists, the operation will fail and an {@link IOException} will be thrown. + * + * @param blobName + * The name of the blob to write the contents of the input stream to. + * @param inputStream + * The input stream from which to retrieve the bytes to write to the blob. + * @param blobSize + * The size of the blob to be written, in bytes. It is implementation dependent whether + * this value is used in writing the blob to the repository. + * @throws FileAlreadyExistsException if a blob by the same name already exists + * @throws IOException if the input stream could not be read, or the target blob could not be written to. + */ + default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + writeBlob(blobName, inputStream, blobSize); + } + /** * Deletes a blob with giving name, if the blob exists. If the blob does not exist, * this method throws a NoSuchFileException. diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index a9600681d16..6f1df0011b1 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -19,11 +19,12 @@ package org.elasticsearch.common.blobstore.fs; -import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetaData; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.core.internal.io.Streams; import java.io.BufferedInputStream; @@ -56,8 +57,9 @@ import static java.util.Collections.unmodifiableMap; */ public class FsBlobContainer extends AbstractBlobContainer { - protected final FsBlobStore blobStore; + private static final String TEMP_FILE_PREFIX = "pending-"; + protected final FsBlobStore blobStore; protected final Path path; public FsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) { @@ -131,6 +133,48 @@ public class FsBlobContainer extends AbstractBlobContainer { IOUtils.fsync(path, true); } + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + final String tempBlob = tempBlobName(blobName); + final Path tempBlobPath = path.resolve(tempBlob); + try { + try (OutputStream outputStream = Files.newOutputStream(tempBlobPath, StandardOpenOption.CREATE_NEW)) { + Streams.copy(inputStream, outputStream); + } + IOUtils.fsync(tempBlobPath, false); + + final Path blobPath = path.resolve(blobName); + // If the target file exists then Files.move() behaviour is implementation specific + // the existing file might be replaced or this method fails by throwing an IOException. + if (Files.exists(blobPath)) { + throw new FileAlreadyExistsException("blob [" + blobPath + "] already exists, cannot overwrite"); + } + Files.move(tempBlobPath, blobPath, StandardCopyOption.ATOMIC_MOVE); + } catch (IOException ex) { + try { + deleteBlobIgnoringIfNotExists(tempBlob); + } catch (IOException e) { + ex.addSuppressed(e); + } + throw ex; + } finally { + IOUtils.fsync(path, true); + } + } + + public static String tempBlobName(final String blobName) { + return "pending-" + blobName + "-" + UUIDs.randomBase64UUID(); + } + + /** + * Returns true if the blob is a leftover temporary blob. + * + * The temporary blobs might be left after failed atomic write operation. + */ + public static boolean isTempBlobName(final String blobName) { + return blobName.startsWith(TEMP_FILE_PREFIX); + } + @Override public void move(String source, String target) throws IOException { Path sourcePath = path.resolve(source); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index cc5cfcccf3b..618dd3b8bc3 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -50,6 +50,7 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetaData; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.fs.FsBlobContainer; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; @@ -555,10 +556,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp String blobName = "master.dat"; BytesArray bytes = new BytesArray(testBytes); try (InputStream stream = bytes.streamInput()) { - testContainer.writeBlob(blobName + "-temp", stream, bytes.length()); + testContainer.writeBlobAtomic(blobName, stream, bytes.length()); } - // Make sure that move is supported - testContainer.move(blobName + "-temp", blobName); return seed; } } catch (IOException exp) { @@ -774,18 +773,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException { - final String tempBlobName = "pending-" + blobName + "-" + UUIDs.randomBase64UUID(); try (InputStream stream = bytesRef.streamInput()) { - snapshotsBlobContainer.writeBlob(tempBlobName, stream, bytesRef.length()); - snapshotsBlobContainer.move(tempBlobName, blobName); - } catch (IOException ex) { - // temporary blob creation or move failed - try cleaning up - try { - snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(tempBlobName); - } catch (IOException e) { - ex.addSuppressed(e); - } - throw ex; + snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length()); } } @@ -955,7 +944,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier // attempt to write an index file with this generation failed mid-way after creating the temporary file. for (final String blobName : blobs.keySet()) { - if (indexShardSnapshotsFormat.isTempBlobName(blobName)) { + if (FsBlobContainer.isTempBlobName(blobName)) { try { blobContainer.deleteBlobIgnoringIfNotExists(blobName); } catch (IOException e) { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java index 8c8139d5abd..df9b41ba872 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/ChecksumBlobStoreFormat.java @@ -23,6 +23,7 @@ import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.store.OutputStreamIndexOutput; +import org.elasticsearch.common.CheckedConsumer; import org.elasticsearch.common.CheckedFunction; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.bytes.BytesArray; @@ -52,8 +53,6 @@ import java.util.Locale; */ public class ChecksumBlobStoreFormat extends BlobStoreFormat { - private static final String TEMP_FILE_PREFIX = "pending-"; - private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE; // The format version @@ -120,7 +119,7 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm } /** - * Writes blob in atomic manner with resolving the blob name using {@link #blobName} and {@link #tempBlobName} methods. + * Writes blob in atomic manner with resolving the blob name using {@link #blobName} method. *

* The blob will be compressed and checksum will be written if required. * @@ -131,20 +130,12 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm * @param name blob name */ public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - String tempBlobName = tempBlobName(name); - writeBlob(obj, blobContainer, tempBlobName); - try { - blobContainer.move(tempBlobName, blobName); - } catch (IOException ex) { - // Move failed - try cleaning up - try { - blobContainer.deleteBlob(tempBlobName); - } catch (Exception e) { - ex.addSuppressed(e); + final String blobName = blobName(name); + writeTo(obj, blobName, bytesArray -> { + try (InputStream stream = bytesArray.streamInput()) { + blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length()); } - throw ex; - } + }); } /** @@ -157,51 +148,35 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm * @param name blob name */ public void write(T obj, BlobContainer blobContainer, String name) throws IOException { - String blobName = blobName(name); - writeBlob(obj, blobContainer, blobName); + final String blobName = blobName(name); + writeTo(obj, blobName, bytesArray -> { + try (InputStream stream = bytesArray.streamInput()) { + blobContainer.writeBlob(blobName, stream, bytesArray.length()); + } + }); } - /** - * Writes blob in atomic manner without resolving the blobName using using {@link #blobName} method. - *

- * The blob will be compressed and checksum will be written if required. - * - * @param obj object to be serialized - * @param blobContainer blob container - * @param blobName blob name - */ - protected void writeBlob(T obj, BlobContainer blobContainer, String blobName) throws IOException { - BytesReference bytes = write(obj); - try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) { + private void writeTo(final T obj, final String blobName, final CheckedConsumer consumer) throws IOException { + final BytesReference bytes = write(obj); + try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")"; - try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, byteArrayOutputStream, BUFFER_SIZE)) { + try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) { CodecUtil.writeHeader(indexOutput, codec, VERSION); try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) { @Override public void close() throws IOException { // this is important since some of the XContentBuilders write bytes on close. // in order to write the footer we need to prevent closing the actual index input. - } }) { + } + }) { bytes.writeTo(indexOutputOutputStream); } CodecUtil.writeFooter(indexOutput); } - BytesArray bytesArray = new BytesArray(byteArrayOutputStream.toByteArray()); - try (InputStream stream = bytesArray.streamInput()) { - blobContainer.writeBlob(blobName, stream, bytesArray.length()); - } + consumer.accept(new BytesArray(outputStream.toByteArray())); } } - /** - * Returns true if the blob is a leftover temporary blob. - * - * The temporary blobs might be left after failed atomic write operation. - */ - public boolean isTempBlobName(String blobName) { - return blobName.startsWith(ChecksumBlobStoreFormat.TEMP_FILE_PREFIX); - } - protected BytesReference write(T obj) throws IOException { try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) { if (compress) { @@ -222,10 +197,4 @@ public class ChecksumBlobStoreFormat extends BlobStoreForm builder.endObject(); } } - - - protected String tempBlobName(String name) { - return TEMP_FILE_PREFIX + String.format(Locale.ROOT, blobNameFormat, name); - } - } diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java new file mode 100644 index 00000000000..c603eda906c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java @@ -0,0 +1,40 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.blobstore.fs; + +import org.elasticsearch.test.ESTestCase; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.startsWith; + +public class FsBlobContainerTests extends ESTestCase { + + public void testTempBlobName() { + final String blobName = randomAlphaOfLengthBetween(1, 20); + final String tempBlobName = FsBlobContainer.tempBlobName(blobName); + assertThat(tempBlobName, startsWith("pending-")); + assertThat(tempBlobName, containsString(blobName)); + } + + public void testIsTempBlobName() { + final String tempBlobName = FsBlobContainer.tempBlobName(randomAlphaOfLengthBetween(1, 20)); + assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true)); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java similarity index 75% rename from server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java rename to server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java index b08b81db11a..9230cded82b 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreContainerTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreContainerTests.java @@ -16,23 +16,27 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.blobstore; +package org.elasticsearch.common.blobstore.fs; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.repositories.ESBlobStoreContainerTestCase; import java.io.IOException; -import java.nio.file.Path; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase { + protected BlobStore newBlobStore() throws IOException { - Path tempDir = createTempDir(); - Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); - return new FsBlobStore(settings, tempDir); + final Settings settings; + if (randomBoolean()) { + settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); + } else { + settings = Settings.EMPTY; + } + return new FsBlobStore(settings, createTempDir()); } } diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java similarity index 84% rename from server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java rename to server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java index 8b9021cae93..59e4ffd7927 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/FsBlobStoreTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobStoreTests.java @@ -16,10 +16,12 @@ * specific language governing permissions and limitations * under the License. */ -package org.elasticsearch.common.blobstore; +package org.elasticsearch.common.blobstore.fs; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -32,10 +34,15 @@ import java.nio.file.Path; @LuceneTestCase.SuppressFileSystems("ExtrasFS") public class FsBlobStoreTests extends ESBlobStoreTestCase { + protected BlobStore newBlobStore() throws IOException { - Path tempDir = createTempDir(); - Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); - return new FsBlobStore(settings, tempDir); + final Settings settings; + if (randomBoolean()) { + settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build(); + } else { + settings = Settings.EMPTY; + } + return new FsBlobStore(settings, createTempDir()); } public void testReadOnly() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 45110ee6a2d..23c56688e00 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/server/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -94,7 +94,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase { } Thread.sleep(100); } - fail("Timeout!!!"); + fail("Timeout waiting for node [" + node + "] to be blocked"); } public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java index 65926234d45..70be72989cf 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/BlobStoreFormatIT.java @@ -224,52 +224,16 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase { IOException writeBlobException = expectThrows(IOException.class, () -> { BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { @Override - public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - throw new IOException("Exception thrown in writeBlob() for " + blobName); + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize) throws IOException { + throw new IOException("Exception thrown in writeBlobAtomic() for " + blobName); } }; checksumFormat.writeAtomic(blobObj, wrapper, name); }); - assertEquals("Exception thrown in writeBlob() for pending-" + name, writeBlobException.getMessage()); + assertEquals("Exception thrown in writeBlobAtomic() for " + name, writeBlobException.getMessage()); assertEquals(0, writeBlobException.getSuppressed().length); } - { - IOException moveException = expectThrows(IOException.class, () -> { - BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - throw new IOException("Exception thrown in move() for " + sourceBlobName); - } - }; - checksumFormat.writeAtomic(blobObj, wrapper, name); - }); - assertEquals("Exception thrown in move() for pending-" + name, moveException.getMessage()); - assertEquals(0, moveException.getSuppressed().length); - } - { - IOException moveThenDeleteException = expectThrows(IOException.class, () -> { - BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { - @Override - public void move(String sourceBlobName, String targetBlobName) throws IOException { - throw new IOException("Exception thrown in move() for " + sourceBlobName); - } - - @Override - public void deleteBlob(String blobName) throws IOException { - throw new IOException("Exception thrown in deleteBlob() for " + blobName); - } - }; - checksumFormat.writeAtomic(blobObj, wrapper, name); - }); - - assertEquals("Exception thrown in move() for pending-" + name, moveThenDeleteException.getMessage()); - assertEquals(1, moveThenDeleteException.getSuppressed().length); - - final Throwable suppressedThrowable = moveThenDeleteException.getSuppressed()[0]; - assertTrue(suppressedThrowable instanceof IOException); - assertEquals("Exception thrown in deleteBlob() for pending-" + name, suppressedThrowable.getMessage()); - } } protected BlobStore createTestBlobStore() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index 56a4a279cab..089955d140f 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -53,11 +53,21 @@ public class BlobContainerWrapper implements BlobContainer { delegate.writeBlob(blobName, inputStream, blobSize); } + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + delegate.writeBlobAtomic(blobName, inputStream, blobSize); + } + @Override public void deleteBlob(String blobName) throws IOException { delegate.deleteBlob(blobName); } + @Override + public void deleteBlobIgnoringIfNotExists(final String blobName) throws IOException { + delegate.deleteBlobIgnoringIfNotExists(blobName); + } + @Override public Map listBlobs() throws IOException { return delegate.listBlobs(); diff --git a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java index 3a5b068cd89..5fa884adbfe 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java +++ b/server/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java @@ -19,6 +19,28 @@ package org.elasticsearch.snapshots.mockstore; +import com.carrotsearch.randomizedtesting.RandomizedContext; +import org.apache.lucene.index.CorruptIndexException; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetaData; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.common.blobstore.fs.FsBlobContainer; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.SnapshotId; + import java.io.IOException; import java.io.InputStream; import java.io.UnsupportedEncodingException; @@ -29,31 +51,11 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Random; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.atomic.AtomicLong; -import com.carrotsearch.randomizedtesting.RandomizedContext; -import org.apache.lucene.index.CorruptIndexException; -import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.cluster.metadata.MetaData; -import org.elasticsearch.cluster.metadata.RepositoryMetaData; -import org.elasticsearch.common.blobstore.BlobContainer; -import org.elasticsearch.common.blobstore.BlobMetaData; -import org.elasticsearch.common.blobstore.BlobPath; -import org.elasticsearch.common.blobstore.BlobStore; -import org.elasticsearch.common.io.PathUtils; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Setting.Property; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.NamedXContentRegistry; -import org.elasticsearch.env.Environment; -import org.elasticsearch.plugins.RepositoryPlugin; -import org.elasticsearch.repositories.Repository; -import org.elasticsearch.repositories.IndexId; -import org.elasticsearch.repositories.fs.FsRepository; -import org.elasticsearch.snapshots.SnapshotId; - public class MockRepository extends FsRepository { public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin { @@ -325,6 +327,12 @@ public class MockRepository extends FsRepository { super.deleteBlob(blobName); } + @Override + public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException { + maybeIOExceptionOrBlock(blobName); + super.deleteBlobIgnoringIfNotExists(blobName); + } + @Override public Map listBlobs() throws IOException { maybeIOExceptionOrBlock(""); @@ -365,6 +373,31 @@ public class MockRepository extends FsRepository { maybeIOExceptionOrBlock(blobName); } } + + @Override + public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { + final Random random = RandomizedContext.current().getRandom(); + if (random.nextBoolean()) { + if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) { + // Simulate a failure between the write and move operation in FsBlobContainer + final String tempBlobName = FsBlobContainer.tempBlobName(blobName); + super.writeBlob(tempBlobName, inputStream, blobSize); + maybeIOExceptionOrBlock(blobName); + final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate(); + fsBlobContainer.move(tempBlobName, blobName); + } else { + // Atomic write since it is potentially supported + // by the delegating blob container + maybeIOExceptionOrBlock(blobName); + super.writeBlobAtomic(blobName, inputStream, blobSize); + } + } else { + // Simulate a non-atomic write since many blob container + // implementations does not support atomic write + maybeIOExceptionOrBlock(blobName); + super.writeBlob(blobName, inputStream, blobSize); + } + } } } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java index 743be6d1bcb..df2024de445 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/ESBlobStoreContainerTestCase.java @@ -158,7 +158,11 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase { protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException { try (InputStream stream = bytesArray.streamInput()) { - container.writeBlob(blobName, stream, bytesArray.length()); + if (randomBoolean()) { + container.writeBlob(blobName, stream, bytesArray.length()); + } else { + container.writeBlobAtomic(blobName, stream, bytesArray.length()); + } } } From 3b98c26d03799239178752a8adf12c5b8386a5b8 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Tue, 5 Jun 2018 13:43:04 +0200 Subject: [PATCH 3/8] Only auto-update license signature if all nodes ready (#30859) Allows rolling restart from 6.3 to 6.4. Relates to #30731 and #30251 --- .../elasticsearch/license/LicenseService.java | 3 ++- .../elasticsearch/license/LicenseUtils.java | 25 +++++++++++++++++-- .../license/SelfGeneratedLicense.java | 5 ++-- .../license/StartBasicClusterTask.java | 2 +- .../license/StartTrialClusterTask.java | 2 +- .../StartupSelfGeneratedLicenseTask.java | 8 +++--- .../license/LicenseRegistrationTests.java | 4 +-- .../license/LicenseSerializationTests.java | 2 +- .../LicensesMetaDataSerializationTests.java | 2 +- .../license/SelfGeneratedLicenseTests.java | 7 +++--- .../xpack/security/Security.java | 17 ++++++++++--- .../xpack/security/SecurityTests.java | 16 +++++++++++- 12 files changed, 70 insertions(+), 23 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java index 99e6a10ad92..40c694cedb7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseService.java @@ -411,7 +411,8 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste // auto-generate license if no licenses ever existed or if the current license is basic and // needs extended or if the license signature needs to be updated. this will trigger a subsequent cluster changed event if (currentClusterState.getNodes().isLocalNodeElectedMaster() && - (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) || LicenseUtils.signatureNeedsUpdate(currentLicense))) { + (noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) || + LicenseUtils.signatureNeedsUpdate(currentLicense, currentClusterState.nodes()))) { registerOrUpdateSelfGeneratedLicense(); } } else if (logger.isDebugEnabled()) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java index 8fcdc05bcf9..4c8a558682b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicenseUtils.java @@ -6,8 +6,12 @@ package org.elasticsearch.license; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.rest.RestStatus; +import java.util.stream.StreamSupport; + public class LicenseUtils { public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature"; @@ -42,8 +46,25 @@ public class LicenseUtils { * Checks if the signature of a self generated license with older version needs to be * recreated with the new key */ - public static boolean signatureNeedsUpdate(License license) { + public static boolean signatureNeedsUpdate(License license, DiscoveryNodes currentNodes) { + assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; + return ("basic".equals(license.type()) || "trial".equals(license.type())) && - (license.version() < License.VERSION_CRYPTO_ALGORITHMS); + // only upgrade signature when all nodes are ready to deserialize the new signature + (license.version() < License.VERSION_CRYPTO_ALGORITHMS && + compatibleLicenseVersion(currentNodes) == License.VERSION_CRYPTO_ALGORITHMS + ); + } + + public static int compatibleLicenseVersion(DiscoveryNodes currentNodes) { + assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version"; + + if (StreamSupport.stream(currentNodes.spliterator(), false) + .allMatch(node -> node.getVersion().onOrAfter(Version.V_6_4_0))) { + // License.VERSION_CRYPTO_ALGORITHMS was introduced in 6.4.0 + return License.VERSION_CRYPTO_ALGORITHMS; + } else { + return License.VERSION_START_DATE; + } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java index 0bc49d517cd..fb9b167d3db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/SelfGeneratedLicense.java @@ -5,6 +5,7 @@ */ package org.elasticsearch.license; +import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -26,8 +27,8 @@ import static org.elasticsearch.license.CryptUtils.decrypt; class SelfGeneratedLicense { - public static License create(License.Builder specBuilder) { - return create(specBuilder, License.VERSION_CURRENT); + public static License create(License.Builder specBuilder, DiscoveryNodes currentNodes) { + return create(specBuilder, LicenseUtils.compatibleLicenseVersion(currentNodes)); } public static License create(License.Builder specBuilder, int version) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 0cf949a6990..468f1799a07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -73,7 +73,7 @@ public class StartBasicClusterTask extends ClusterStateUpdateTask { .issueDate(issueDate) .type("basic") .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); if (request.isAcknowledged() == false && currentLicense != null) { Map ackMessages = LicenseService.getAckMessages(selfGeneratedLicense, currentLicense); if (ackMessages.isEmpty() == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 5c5c03151ba..2bf0555fde1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -82,7 +82,7 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask { .issueDate(issueDate) .type(request.getType()) .expiryDate(expiryDate); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData); return ClusterState.builder(currentState).metaData(mdBuilder).build(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java index 13d6326f3ce..c2d53bd0716 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -61,7 +61,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { "]. Must be trial or basic."); } return updateWithLicense(currentState, type); - } else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense())) { + } else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense(), currentState.nodes())) { return updateLicenseSignature(currentState, currentLicensesMetaData); } else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) { return extendBasic(currentState, currentLicensesMetaData); @@ -87,7 +87,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { .issueDate(issueDate) .type(type) .expiryDate(expiryDate); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); Version trialVersion = currentLicenseMetaData.getMostRecentTrialVersion(); LicensesMetaData newLicenseMetadata = new LicensesMetaData(selfGeneratedLicense, trialVersion); mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata); @@ -120,7 +120,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { .issueDate(currentLicense.issueDate()) .type("basic") .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentLicense.version()); Version trialVersion = currentLicenseMetadata.getMostRecentTrialVersion(); return new LicensesMetaData(selfGeneratedLicense, trialVersion); } @@ -141,7 +141,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask { .issueDate(issueDate) .type(type) .expiryDate(expiryDate); - License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); + License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes()); LicensesMetaData licensesMetaData; if ("trial".equals(type)) { licensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java index 2a237f090e2..5405af013af 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java @@ -104,7 +104,7 @@ public class LicenseRegistrationTests extends AbstractLicenseServiceTestCase { .issueDate(dateMath("now-10h", now)) .type("basic") .expiryDate(dateMath("now-2h", now)); - License license = SelfGeneratedLicense.create(builder); + License license = SelfGeneratedLicense.create(builder, License.VERSION_CURRENT); XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY); setInitialState(license, licenseState, Settings.EMPTY); @@ -125,4 +125,4 @@ public class LicenseRegistrationTests extends AbstractLicenseServiceTestCase { assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate()); assertEquals(uid, licenseMetaData.getLicense().uid()); } -} \ No newline at end of file +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java index d7cf5ab50fb..d07be0fd3c7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseSerializationTests.java @@ -111,7 +111,7 @@ public class LicenseSerializationTests extends ESTestCase { .issueDate(now) .type("basic") .expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS); - License license = SelfGeneratedLicense.create(specBuilder); + License license = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON); license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true"))); builder.flush(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java index f3ed04ed22d..d7799959f6c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetaDataSerializationTests.java @@ -95,7 +95,7 @@ public class LicensesMetaDataSerializationTests extends ESTestCase { .issueDate(issueDate) .type(randomBoolean() ? "trial" : "basic") .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); - final License trialLicense = SelfGeneratedLicense.create(specBuilder); + final License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); LicensesMetaData licensesMetaData = new LicensesMetaData(trialLicense, Version.CURRENT); XContentBuilder builder = XContentFactory.jsonBuilder(); builder.startObject(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java index aa27dbdcb49..4e061623ccd 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/SelfGeneratedLicenseTests.java @@ -34,7 +34,7 @@ public class SelfGeneratedLicenseTests extends ESTestCase { .type(randomBoolean() ? "trial" : "basic") .issueDate(issueDate) .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); - License trialLicense = SelfGeneratedLicense.create(specBuilder); + License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); assertThat(SelfGeneratedLicense.verify(trialLicense), equalTo(true)); } @@ -47,7 +47,7 @@ public class SelfGeneratedLicenseTests extends ESTestCase { .maxNodes(5) .issueDate(issueDate) .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); - License trialLicense = SelfGeneratedLicense.create(specBuilder); + License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT); final String originalSignature = trialLicense.signature(); License tamperedLicense = License.builder().fromLicenseSpec(trialLicense, originalSignature) .expiryDate(System.currentTimeMillis() + TimeValue.timeValueHours(5).getMillis()) @@ -70,7 +70,8 @@ public class SelfGeneratedLicenseTests extends ESTestCase { .issueDate(issueDate) .expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis()); License pre20TrialLicense = specBuilder.build(); - License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial")); + License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial"), + License.VERSION_CURRENT); assertThat(SelfGeneratedLicense.verify(license), equalTo(true)); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 6d12b6472f1..d2e35999096 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -17,7 +16,6 @@ import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -112,7 +110,6 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler; import org.elasticsearch.xpack.core.security.authc.Realm; import org.elasticsearch.xpack.core.security.authc.RealmSettings; -import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; @@ -934,7 +931,8 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw if (enabled) { return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings), DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings)) - .andThen(new ValidateUpgradedSecurityIndex()); + .andThen(new ValidateUpgradedSecurityIndex()) + .andThen(new ValidateLicenseCanBeDeserialized()); } return null; } @@ -971,6 +969,17 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw } } + static final class ValidateLicenseCanBeDeserialized implements BiConsumer { + @Override + public void accept(DiscoveryNode node, ClusterState state) { + License license = LicenseService.getLicense(state.metaData()); + if (license != null && license.version() >= License.VERSION_CRYPTO_ALGORITHMS && node.getVersion().before(Version.V_6_4_0)) { + throw new IllegalStateException("node " + node + " is on version [" + node.getVersion() + + "] that cannot deserialize the license format [" + license.version() + "], upgrade node to at least 6.4.0"); + } + } + } + @Override public void reloadSPI(ClassLoader loader) { securityExtensions.addAll(SecurityExtension.loadExtensions(loader)); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 190c8703955..b1d8d4b67bf 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.license.TestUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.VersionUtils; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.XPackSettings; @@ -278,6 +279,19 @@ public class SecurityTests extends ESTestCase { } } + public void testJoinValidatorForLicenseDeserialization() throws Exception { + DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(), + VersionUtils.randomVersionBetween(random(), null, Version.V_6_3_0)); + MetaData.Builder builder = MetaData.builder(); + License license = TestUtils.generateSignedLicense(null, + randomIntBetween(License.VERSION_CRYPTO_ALGORITHMS, License.VERSION_CURRENT), -1, TimeValue.timeValueHours(24)); + TestUtils.putLicense(builder, license); + ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build(); + IllegalStateException e = expectThrows(IllegalStateException.class, + () -> new Security.ValidateLicenseCanBeDeserialized().accept(node, state)); + assertThat(e.getMessage(), containsString("cannot deserialize the license format")); + } + public void testIndexJoinValidator_Old_And_Rolling() throws Exception { createComponents(Settings.EMPTY); BiConsumer joinValidator = security.getJoinValidator(); @@ -345,7 +359,7 @@ public class SecurityTests extends ESTestCase { .nodes(discoveryNodes).build(); joinValidator.accept(node, clusterState); } - + public void testGetFieldFilterSecurityEnabled() throws Exception { createComponents(Settings.EMPTY); Function> fieldFilter = security.getFieldFilter(); From 1af6d20efe83722515a0c449cdb2e8e7288686f5 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Tue, 5 Jun 2018 14:54:24 +0200 Subject: [PATCH 4/8] Fix docs build. --- docs/reference/query-dsl/query-string-syntax.asciidoc | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/reference/query-dsl/query-string-syntax.asciidoc b/docs/reference/query-dsl/query-string-syntax.asciidoc index 937fcdae5fe..765b54b5883 100644 --- a/docs/reference/query-dsl/query-string-syntax.asciidoc +++ b/docs/reference/query-dsl/query-string-syntax.asciidoc @@ -254,7 +254,6 @@ would look like this: } } -**** ===== Grouping From 4b893c190068d6cf83a8f341c3f9ca535fb5738e Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 5 Jun 2018 09:02:13 -0400 Subject: [PATCH 5/8] TEST: Retry synced-flush if ongoing ops on primary (#30978) When the last indexing operation is completed, we will fire a global checkpoint sync. Since a global checkpoint sync request is a replication request, it will acquire an index shard permit on the primary when executing. If this happens at the same time while we are issuing the synced-flush, the synced-flush request will fail as it thinks there are in-flight operations. We can avoid such situation by retrying another synced-flush if the current request fails due to ongoing operations on the primary. Closes #29392 --- .../indices/flush/SyncedFlushService.java | 12 ------ .../elasticsearch/indices/flush/FlushIT.java | 22 +---------- .../indices/flush/SyncedFlushUtil.java | 37 +++++++++++++------ 3 files changed, 26 insertions(+), 45 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java index 52e0ac8ab86..6ef6c1546d1 100644 --- a/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java +++ b/server/src/main/java/org/elasticsearch/indices/flush/SyncedFlushService.java @@ -19,7 +19,6 @@ package org.elasticsearch.indices.flush; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.elasticsearch.Assertions; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -502,18 +501,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL if (indexShard.routingEntry().primary() == false) { throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard"); } - if (Assertions.ENABLED) { - if (logger.isTraceEnabled()) { - logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations()); - } - } int opCount = indexShard.getActiveOperationsCount(); - // Need to snapshot the debug info twice as it's updated concurrently with the permit count. - if (Assertions.ENABLED) { - if (logger.isTraceEnabled()) { - logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations()); - } - } return new InFlightOpsResponse(opCount); } diff --git a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java index 94bd8e80898..a543e87adcb 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/FlushIT.java @@ -46,16 +46,13 @@ import org.elasticsearch.index.shard.IndexShardTestCase; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndicesService; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import java.io.IOException; import java.util.Arrays; import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; @@ -103,7 +100,7 @@ public class FlushIT extends ESIntegTestCase { } } - public void testSyncedFlush() throws ExecutionException, InterruptedException, IOException { + public void testSyncedFlush() throws Exception { internalCluster().ensureAtLeastNumDataNodes(2); prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get(); ensureGreen(); @@ -246,16 +243,6 @@ public class FlushIT extends ESIntegTestCase { assertThat(indexResult.getFailure(), nullValue()); } - private String syncedFlushDescription(ShardsSyncedFlushResult result) { - String detail = result.shardResponses().entrySet().stream() - .map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]") - .collect(Collectors.joining(",")); - return String.format(Locale.ROOT, "Total shards: [%d], failed: [%s], reason: [%s], detail: [%s]", - result.totalShards(), result.failed(), result.failureReason(), detail); - } - - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392") - @TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE") public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -281,7 +268,6 @@ public class FlushIT extends ESIntegTestCase { indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i); } final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Partial seal: {}", syncedFlushDescription(partialResult)); assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1)); assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas)); assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo( @@ -297,8 +283,6 @@ public class FlushIT extends ESIntegTestCase { assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1)); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392") - @TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE") public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception { internalCluster().ensureAtLeastNumDataNodes(between(2, 3)); final int numberOfReplicas = internalCluster().numDataNodes() - 1; @@ -315,11 +299,9 @@ public class FlushIT extends ESIntegTestCase { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("First seal: {}", syncedFlushDescription(firstSeal)); assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1)); // Do not renew synced-flush final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Second seal: {}", syncedFlushDescription(secondSeal)); assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId())); // Shards were updated, renew synced flush. @@ -328,7 +310,6 @@ public class FlushIT extends ESIntegTestCase { index("test", "doc", Integer.toString(i)); } final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Third seal: {}", syncedFlushDescription(thirdSeal)); assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId()))); // Manually remove or change sync-id, renew synced flush. @@ -344,7 +325,6 @@ public class FlushIT extends ESIntegTestCase { assertThat(shard.commitStats().syncId(), nullValue()); } final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId); - logger.info("Forth seal: {}", syncedFlushDescription(forthSeal)); assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1)); assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId()))); } diff --git a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java index 987f69b6587..8a8d57295a5 100644 --- a/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java +++ b/server/src/test/java/org/elasticsearch/indices/flush/SyncedFlushUtil.java @@ -29,6 +29,9 @@ import org.elasticsearch.test.InternalTestCluster; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.test.ESTestCase.assertBusy; /** Utils for SyncedFlush */ public class SyncedFlushUtil { @@ -40,21 +43,31 @@ public class SyncedFlushUtil { /** * Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)} */ - public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) { + public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception { + /* + * When the last indexing operation is completed, we will fire a global checkpoint sync. + * Since a global checkpoint sync request is a replication request, it will acquire an index + * shard permit on the primary when executing. If this happens at the same time while we are + * issuing the synced-flush, the synced-flush request will fail as it thinks there are + * in-flight operations. We can avoid such situation by continuing issuing another synced-flush + * if the synced-flush failed due to the ongoing operations on the primary. + */ SyncedFlushService service = cluster.getInstance(SyncedFlushService.class); - logger.debug("Issue synced-flush on node [{}], shard [{}], cluster state [{}]", - service.nodeName(), shardId, cluster.clusterService(service.nodeName()).state()); - LatchedListener listener = new LatchedListener<>(); - service.attemptSyncedFlush(shardId, listener); - try { + AtomicReference> listenerHolder = new AtomicReference<>(); + assertBusy(() -> { + LatchedListener listener = new LatchedListener<>(); + listenerHolder.set(listener); + service.attemptSyncedFlush(shardId, listener); listener.latch.await(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); + if (listener.result != null && listener.result.failureReason() != null + && listener.result.failureReason().contains("ongoing operations on primary")) { + throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry + } + }); + if (listenerHolder.get().error != null) { + throw ExceptionsHelper.convertToElastic(listenerHolder.get().error); } - if (listener.error != null) { - throw ExceptionsHelper.convertToElastic(listener.error); - } - return listener.result; + return listenerHolder.get().result; } public static final class LatchedListener implements ActionListener { From 4624ba5e100e19bb624e0b72e6b55eb171d6fef4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 5 Jun 2018 15:29:00 +0200 Subject: [PATCH 6/8] [Tests] Muting RatedRequestsTests#testXContentParsingIsNotLenient --- .../org/elasticsearch/index/rankeval/RatedRequestsTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java index 084f29b8c9a..1be1acb1317 100644 --- a/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java +++ b/modules/rank-eval/src/test/java/org/elasticsearch/index/rankeval/RatedRequestsTests.java @@ -131,6 +131,7 @@ public class RatedRequestsTests extends ESTestCase { } } + @AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31104") public void testXContentParsingIsNotLenient() throws IOException { RatedRequest testItem = createTestItem(randomBoolean()); XContentType xContentType = randomFrom(XContentType.values()); From 81172c0c31e99cbad073471a0595d63b80d6dbe4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20B=C3=BCscher?= Date: Tue, 5 Jun 2018 15:47:53 +0200 Subject: [PATCH 7/8] Adapt bwc versions after backporting #30983 to 6.4 --- .../action/admin/cluster/reroute/ClusterRerouteResponse.java | 4 ++-- .../cluster/settings/ClusterUpdateSettingsResponse.java | 4 ++-- .../action/admin/indices/rollover/RolloverResponse.java | 4 ++-- .../cluster/settings/ClusterUpdateSettingsResponseTests.java | 2 +- .../action/admin/indices/rollover/RolloverResponseTests.java | 5 ++++- 5 files changed, 11 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java index 792f2135e78..3c35b977024 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponse.java @@ -63,7 +63,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXC @Override public void readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { super.readFrom(in); state = ClusterState.readFrom(in, null); explanations = RoutingExplanations.readFrom(in); @@ -76,7 +76,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXC @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { super.writeTo(out); state.writeTo(out); RoutingExplanations.writeTo(explanations, out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java index 2691d5b5b09..cc29e60aa99 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponse.java @@ -68,7 +68,7 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { @Override public void readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { super.readFrom(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); @@ -89,7 +89,7 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { super.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index 1342e62c652..2d699591192 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -115,7 +115,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement @Override public void readFrom(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (in.getVersion().onOrAfter(Version.V_6_4_0)) { super.readFrom(in); oldIndex = in.readString(); newIndex = in.readString(); @@ -144,7 +144,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) { + if (out.getVersion().onOrAfter(Version.V_6_4_0)) { super.writeTo(out); out.writeString(oldIndex); out.writeString(newIndex); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java index c15c0a1be7f..e8bd14b640d 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/settings/ClusterUpdateSettingsResponseTests.java @@ -102,6 +102,6 @@ public class ClusterUpdateSettingsResponseTests extends AbstractStreamableXConte public void testOldSerialisation() throws IOException { ClusterUpdateSettingsResponse original = createTestInstance(); - assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0_alpha1)); + assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0)); } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java index c3ff4511815..8750eefc4d4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.rollover; +import com.carrotsearch.randomizedtesting.annotations.Repeat; + import org.elasticsearch.Version; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentParser; @@ -130,8 +132,9 @@ public class RolloverResponseTests extends AbstractStreamableXContentTestCase Date: Tue, 5 Jun 2018 16:34:19 +0200 Subject: [PATCH 8/8] Removing erroneous repeat --- .../action/admin/indices/rollover/RolloverResponseTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java index 8750eefc4d4..903accac6ab 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponseTests.java @@ -132,7 +132,6 @@ public class RolloverResponseTests extends AbstractStreamableXContentTestCase