Merge branch 'master' into ccr
* master: Removing erroneous repeat Adapt bwc versions after backporting #30983 to 6.4 [Tests] Muting RatedRequestsTests#testXContentParsingIsNotLenient TEST: Retry synced-flush if ongoing ops on primary (#30978) Fix docs build. Only auto-update license signature if all nodes ready (#30859) Add BlobContainer.writeBlobAtomic() (#30902) Add a doc value format to binary fields. (#30860)
This commit is contained in:
commit
2b5c0d77e9
|
@ -505,8 +505,6 @@
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
|
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]settings[/\\]ClusterSettingsIT.java" checks="LineLength" />
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
|
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]shards[/\\]ClusterSearchShardsIT.java" checks="LineLength" />
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]structure[/\\]RoutingIteratorTests.java" checks="LineLength" />
|
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]structure[/\\]RoutingIteratorTests.java" checks="LineLength" />
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreContainerTests.java" checks="LineLength" />
|
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]blobstore[/\\]FsBlobStoreTests.java" checks="LineLength" />
|
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]breaker[/\\]MemoryCircuitBreakerTests.java" checks="LineLength" />
|
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]breaker[/\\]MemoryCircuitBreakerTests.java" checks="LineLength" />
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]ShapeBuilderTests.java" checks="LineLength" />
|
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]geo[/\\]ShapeBuilderTests.java" checks="LineLength" />
|
||||||
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]hash[/\\]MessageDigestsTests.java" checks="LineLength" />
|
<suppress files="server[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]hash[/\\]MessageDigestsTests.java" checks="LineLength" />
|
||||||
|
|
|
@ -254,7 +254,6 @@ would look like this:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
****
|
|
||||||
|
|
||||||
===== Grouping
|
===== Grouping
|
||||||
|
|
||||||
|
|
|
@ -131,6 +131,7 @@ public class RatedRequestsTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/31104")
|
||||||
public void testXContentParsingIsNotLenient() throws IOException {
|
public void testXContentParsingIsNotLenient() throws IOException {
|
||||||
RatedRequest testItem = createTestItem(randomBoolean());
|
RatedRequest testItem = createTestItem(randomBoolean());
|
||||||
XContentType xContentType = randomFrom(XContentType.values());
|
XContentType xContentType = randomFrom(XContentType.values());
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXC
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
state = ClusterState.readFrom(in, null);
|
state = ClusterState.readFrom(in, null);
|
||||||
explanations = RoutingExplanations.readFrom(in);
|
explanations = RoutingExplanations.readFrom(in);
|
||||||
|
@ -76,7 +76,7 @@ public class ClusterRerouteResponse extends AcknowledgedResponse implements ToXC
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
state.writeTo(out);
|
state.writeTo(out);
|
||||||
RoutingExplanations.writeTo(explanations, out);
|
RoutingExplanations.writeTo(explanations, out);
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
transientSettings = Settings.readSettingsFromStream(in);
|
transientSettings = Settings.readSettingsFromStream(in);
|
||||||
persistentSettings = Settings.readSettingsFromStream(in);
|
persistentSettings = Settings.readSettingsFromStream(in);
|
||||||
|
@ -89,7 +89,7 @@ public class ClusterUpdateSettingsResponse extends AcknowledgedResponse {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
Settings.writeSettingsToStream(transientSettings, out);
|
Settings.writeSettingsToStream(transientSettings, out);
|
||||||
Settings.writeSettingsToStream(persistentSettings, out);
|
Settings.writeSettingsToStream(persistentSettings, out);
|
||||||
|
|
|
@ -115,7 +115,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public void readFrom(StreamInput in) throws IOException {
|
||||||
if (in.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (in.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||||
super.readFrom(in);
|
super.readFrom(in);
|
||||||
oldIndex = in.readString();
|
oldIndex = in.readString();
|
||||||
newIndex = in.readString();
|
newIndex = in.readString();
|
||||||
|
@ -144,7 +144,7 @@ public final class RolloverResponse extends ShardsAcknowledgedResponse implement
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
if (out.getVersion().onOrAfter(Version.V_7_0_0_alpha1)) {
|
if (out.getVersion().onOrAfter(Version.V_6_4_0)) {
|
||||||
super.writeTo(out);
|
super.writeTo(out);
|
||||||
out.writeString(oldIndex);
|
out.writeString(oldIndex);
|
||||||
out.writeString(newIndex);
|
out.writeString(newIndex);
|
||||||
|
|
|
@ -74,6 +74,29 @@ public interface BlobContainer {
|
||||||
*/
|
*/
|
||||||
void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException;
|
void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Reads blob content from the input stream and writes it to the container in a new blob with the given name,
|
||||||
|
* using an atomic write operation if the implementation supports it. When the BlobContainer implementation
|
||||||
|
* does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then
|
||||||
|
* the {@link #writeBlob(String, InputStream, long)} method is used.
|
||||||
|
*
|
||||||
|
* This method assumes the container does not already contain a blob of the same blobName. If a blob by the
|
||||||
|
* same name already exists, the operation will fail and an {@link IOException} will be thrown.
|
||||||
|
*
|
||||||
|
* @param blobName
|
||||||
|
* The name of the blob to write the contents of the input stream to.
|
||||||
|
* @param inputStream
|
||||||
|
* The input stream from which to retrieve the bytes to write to the blob.
|
||||||
|
* @param blobSize
|
||||||
|
* The size of the blob to be written, in bytes. It is implementation dependent whether
|
||||||
|
* this value is used in writing the blob to the repository.
|
||||||
|
* @throws FileAlreadyExistsException if a blob by the same name already exists
|
||||||
|
* @throws IOException if the input stream could not be read, or the target blob could not be written to.
|
||||||
|
*/
|
||||||
|
default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException {
|
||||||
|
writeBlob(blobName, inputStream, blobSize);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Deletes a blob with giving name, if the blob exists. If the blob does not exist,
|
* Deletes a blob with giving name, if the blob exists. If the blob does not exist,
|
||||||
* this method throws a NoSuchFileException.
|
* this method throws a NoSuchFileException.
|
||||||
|
|
|
@ -19,11 +19,12 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore.fs;
|
package org.elasticsearch.common.blobstore.fs;
|
||||||
|
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.common.UUIDs;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
import org.elasticsearch.core.internal.io.Streams;
|
import org.elasticsearch.core.internal.io.Streams;
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
|
@ -56,8 +57,9 @@ import static java.util.Collections.unmodifiableMap;
|
||||||
*/
|
*/
|
||||||
public class FsBlobContainer extends AbstractBlobContainer {
|
public class FsBlobContainer extends AbstractBlobContainer {
|
||||||
|
|
||||||
protected final FsBlobStore blobStore;
|
private static final String TEMP_FILE_PREFIX = "pending-";
|
||||||
|
|
||||||
|
protected final FsBlobStore blobStore;
|
||||||
protected final Path path;
|
protected final Path path;
|
||||||
|
|
||||||
public FsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) {
|
public FsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, Path path) {
|
||||||
|
@ -131,6 +133,48 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
||||||
IOUtils.fsync(path, true);
|
IOUtils.fsync(path, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException {
|
||||||
|
final String tempBlob = tempBlobName(blobName);
|
||||||
|
final Path tempBlobPath = path.resolve(tempBlob);
|
||||||
|
try {
|
||||||
|
try (OutputStream outputStream = Files.newOutputStream(tempBlobPath, StandardOpenOption.CREATE_NEW)) {
|
||||||
|
Streams.copy(inputStream, outputStream);
|
||||||
|
}
|
||||||
|
IOUtils.fsync(tempBlobPath, false);
|
||||||
|
|
||||||
|
final Path blobPath = path.resolve(blobName);
|
||||||
|
// If the target file exists then Files.move() behaviour is implementation specific
|
||||||
|
// the existing file might be replaced or this method fails by throwing an IOException.
|
||||||
|
if (Files.exists(blobPath)) {
|
||||||
|
throw new FileAlreadyExistsException("blob [" + blobPath + "] already exists, cannot overwrite");
|
||||||
|
}
|
||||||
|
Files.move(tempBlobPath, blobPath, StandardCopyOption.ATOMIC_MOVE);
|
||||||
|
} catch (IOException ex) {
|
||||||
|
try {
|
||||||
|
deleteBlobIgnoringIfNotExists(tempBlob);
|
||||||
|
} catch (IOException e) {
|
||||||
|
ex.addSuppressed(e);
|
||||||
|
}
|
||||||
|
throw ex;
|
||||||
|
} finally {
|
||||||
|
IOUtils.fsync(path, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String tempBlobName(final String blobName) {
|
||||||
|
return "pending-" + blobName + "-" + UUIDs.randomBase64UUID();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if the blob is a leftover temporary blob.
|
||||||
|
*
|
||||||
|
* The temporary blobs might be left after failed atomic write operation.
|
||||||
|
*/
|
||||||
|
public static boolean isTempBlobName(final String blobName) {
|
||||||
|
return blobName.startsWith(TEMP_FILE_PREFIX);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void move(String source, String target) throws IOException {
|
public void move(String source, String target) throws IOException {
|
||||||
Path sourcePath = path.resolve(source);
|
Path sourcePath = path.resolve(source);
|
||||||
|
|
|
@ -40,6 +40,8 @@ import org.elasticsearch.index.fielddata.IndexFieldData;
|
||||||
import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData;
|
import org.elasticsearch.index.fielddata.plain.BytesBinaryDVIndexFieldData;
|
||||||
import org.elasticsearch.index.query.QueryShardContext;
|
import org.elasticsearch.index.query.QueryShardContext;
|
||||||
import org.elasticsearch.index.query.QueryShardException;
|
import org.elasticsearch.index.query.QueryShardException;
|
||||||
|
import org.elasticsearch.search.DocValueFormat;
|
||||||
|
import org.joda.time.DateTimeZone;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Base64;
|
import java.util.Base64;
|
||||||
|
@ -104,6 +106,10 @@ public class BinaryFieldMapper extends FieldMapper {
|
||||||
return CONTENT_TYPE;
|
return CONTENT_TYPE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DocValueFormat docValueFormat(String format, DateTimeZone timeZone) {
|
||||||
|
return DocValueFormat.BINARY;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BytesReference valueForDisplay(Object value) {
|
public BytesReference valueForDisplay(Object value) {
|
||||||
|
|
|
@ -507,18 +507,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
||||||
if (indexShard.routingEntry().primary() == false) {
|
if (indexShard.routingEntry().primary() == false) {
|
||||||
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
throw new IllegalStateException("[" + request.shardId() +"] expected a primary shard");
|
||||||
}
|
}
|
||||||
if (Assertions.ENABLED) {
|
|
||||||
if (logger.isTraceEnabled()) {
|
|
||||||
logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
int opCount = indexShard.getActiveOperationsCount();
|
int opCount = indexShard.getActiveOperationsCount();
|
||||||
// Need to snapshot the debug info twice as it's updated concurrently with the permit count.
|
|
||||||
if (Assertions.ENABLED) {
|
|
||||||
if (logger.isTraceEnabled()) {
|
|
||||||
logger.trace("in flight operations {}, acquirers {}", indexShard.getActiveOperationsCount(), indexShard.getActiveOperations());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new InFlightOpsResponse(opCount);
|
return new InFlightOpsResponse(opCount);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,7 @@ import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
|
import org.elasticsearch.common.blobstore.fs.FsBlobContainer;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.collect.Tuple;
|
import org.elasticsearch.common.collect.Tuple;
|
||||||
|
@ -555,10 +556,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
String blobName = "master.dat";
|
String blobName = "master.dat";
|
||||||
BytesArray bytes = new BytesArray(testBytes);
|
BytesArray bytes = new BytesArray(testBytes);
|
||||||
try (InputStream stream = bytes.streamInput()) {
|
try (InputStream stream = bytes.streamInput()) {
|
||||||
testContainer.writeBlob(blobName + "-temp", stream, bytes.length());
|
testContainer.writeBlobAtomic(blobName, stream, bytes.length());
|
||||||
}
|
}
|
||||||
// Make sure that move is supported
|
|
||||||
testContainer.move(blobName + "-temp", blobName);
|
|
||||||
return seed;
|
return seed;
|
||||||
}
|
}
|
||||||
} catch (IOException exp) {
|
} catch (IOException exp) {
|
||||||
|
@ -774,18 +773,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
}
|
}
|
||||||
|
|
||||||
private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException {
|
private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException {
|
||||||
final String tempBlobName = "pending-" + blobName + "-" + UUIDs.randomBase64UUID();
|
|
||||||
try (InputStream stream = bytesRef.streamInput()) {
|
try (InputStream stream = bytesRef.streamInput()) {
|
||||||
snapshotsBlobContainer.writeBlob(tempBlobName, stream, bytesRef.length());
|
snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length());
|
||||||
snapshotsBlobContainer.move(tempBlobName, blobName);
|
|
||||||
} catch (IOException ex) {
|
|
||||||
// temporary blob creation or move failed - try cleaning up
|
|
||||||
try {
|
|
||||||
snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(tempBlobName);
|
|
||||||
} catch (IOException e) {
|
|
||||||
ex.addSuppressed(e);
|
|
||||||
}
|
|
||||||
throw ex;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -955,7 +944,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||||
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
|
// Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier
|
||||||
// attempt to write an index file with this generation failed mid-way after creating the temporary file.
|
// attempt to write an index file with this generation failed mid-way after creating the temporary file.
|
||||||
for (final String blobName : blobs.keySet()) {
|
for (final String blobName : blobs.keySet()) {
|
||||||
if (indexShardSnapshotsFormat.isTempBlobName(blobName)) {
|
if (FsBlobContainer.isTempBlobName(blobName)) {
|
||||||
try {
|
try {
|
||||||
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
blobContainer.deleteBlobIgnoringIfNotExists(blobName);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.index.IndexFormatTooNewException;
|
import org.apache.lucene.index.IndexFormatTooNewException;
|
||||||
import org.apache.lucene.index.IndexFormatTooOldException;
|
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||||
import org.apache.lucene.store.OutputStreamIndexOutput;
|
import org.apache.lucene.store.OutputStreamIndexOutput;
|
||||||
|
import org.elasticsearch.common.CheckedConsumer;
|
||||||
import org.elasticsearch.common.CheckedFunction;
|
import org.elasticsearch.common.CheckedFunction;
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
|
@ -52,8 +53,6 @@ import java.util.Locale;
|
||||||
*/
|
*/
|
||||||
public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreFormat<T> {
|
public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreFormat<T> {
|
||||||
|
|
||||||
private static final String TEMP_FILE_PREFIX = "pending-";
|
|
||||||
|
|
||||||
private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE;
|
private static final XContentType DEFAULT_X_CONTENT_TYPE = XContentType.SMILE;
|
||||||
|
|
||||||
// The format version
|
// The format version
|
||||||
|
@ -120,7 +119,7 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Writes blob in atomic manner with resolving the blob name using {@link #blobName} and {@link #tempBlobName} methods.
|
* Writes blob in atomic manner with resolving the blob name using {@link #blobName} method.
|
||||||
* <p>
|
* <p>
|
||||||
* The blob will be compressed and checksum will be written if required.
|
* The blob will be compressed and checksum will be written if required.
|
||||||
*
|
*
|
||||||
|
@ -131,20 +130,12 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
|
||||||
* @param name blob name
|
* @param name blob name
|
||||||
*/
|
*/
|
||||||
public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws IOException {
|
public void writeAtomic(T obj, BlobContainer blobContainer, String name) throws IOException {
|
||||||
String blobName = blobName(name);
|
final String blobName = blobName(name);
|
||||||
String tempBlobName = tempBlobName(name);
|
writeTo(obj, blobName, bytesArray -> {
|
||||||
writeBlob(obj, blobContainer, tempBlobName);
|
try (InputStream stream = bytesArray.streamInput()) {
|
||||||
try {
|
blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length());
|
||||||
blobContainer.move(tempBlobName, blobName);
|
|
||||||
} catch (IOException ex) {
|
|
||||||
// Move failed - try cleaning up
|
|
||||||
try {
|
|
||||||
blobContainer.deleteBlob(tempBlobName);
|
|
||||||
} catch (Exception e) {
|
|
||||||
ex.addSuppressed(e);
|
|
||||||
}
|
}
|
||||||
throw ex;
|
});
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -157,51 +148,35 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
|
||||||
* @param name blob name
|
* @param name blob name
|
||||||
*/
|
*/
|
||||||
public void write(T obj, BlobContainer blobContainer, String name) throws IOException {
|
public void write(T obj, BlobContainer blobContainer, String name) throws IOException {
|
||||||
String blobName = blobName(name);
|
final String blobName = blobName(name);
|
||||||
writeBlob(obj, blobContainer, blobName);
|
writeTo(obj, blobName, bytesArray -> {
|
||||||
|
try (InputStream stream = bytesArray.streamInput()) {
|
||||||
|
blobContainer.writeBlob(blobName, stream, bytesArray.length());
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
private void writeTo(final T obj, final String blobName, final CheckedConsumer<BytesArray, IOException> consumer) throws IOException {
|
||||||
* Writes blob in atomic manner without resolving the blobName using using {@link #blobName} method.
|
final BytesReference bytes = write(obj);
|
||||||
* <p>
|
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {
|
||||||
* The blob will be compressed and checksum will be written if required.
|
|
||||||
*
|
|
||||||
* @param obj object to be serialized
|
|
||||||
* @param blobContainer blob container
|
|
||||||
* @param blobName blob name
|
|
||||||
*/
|
|
||||||
protected void writeBlob(T obj, BlobContainer blobContainer, String blobName) throws IOException {
|
|
||||||
BytesReference bytes = write(obj);
|
|
||||||
try (ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream()) {
|
|
||||||
final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")";
|
final String resourceDesc = "ChecksumBlobStoreFormat.writeBlob(blob=\"" + blobName + "\")";
|
||||||
try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, byteArrayOutputStream, BUFFER_SIZE)) {
|
try (OutputStreamIndexOutput indexOutput = new OutputStreamIndexOutput(resourceDesc, blobName, outputStream, BUFFER_SIZE)) {
|
||||||
CodecUtil.writeHeader(indexOutput, codec, VERSION);
|
CodecUtil.writeHeader(indexOutput, codec, VERSION);
|
||||||
try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) {
|
try (OutputStream indexOutputOutputStream = new IndexOutputOutputStream(indexOutput) {
|
||||||
@Override
|
@Override
|
||||||
public void close() throws IOException {
|
public void close() throws IOException {
|
||||||
// this is important since some of the XContentBuilders write bytes on close.
|
// this is important since some of the XContentBuilders write bytes on close.
|
||||||
// in order to write the footer we need to prevent closing the actual index input.
|
// in order to write the footer we need to prevent closing the actual index input.
|
||||||
} }) {
|
}
|
||||||
|
}) {
|
||||||
bytes.writeTo(indexOutputOutputStream);
|
bytes.writeTo(indexOutputOutputStream);
|
||||||
}
|
}
|
||||||
CodecUtil.writeFooter(indexOutput);
|
CodecUtil.writeFooter(indexOutput);
|
||||||
}
|
}
|
||||||
BytesArray bytesArray = new BytesArray(byteArrayOutputStream.toByteArray());
|
consumer.accept(new BytesArray(outputStream.toByteArray()));
|
||||||
try (InputStream stream = bytesArray.streamInput()) {
|
|
||||||
blobContainer.writeBlob(blobName, stream, bytesArray.length());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns true if the blob is a leftover temporary blob.
|
|
||||||
*
|
|
||||||
* The temporary blobs might be left after failed atomic write operation.
|
|
||||||
*/
|
|
||||||
public boolean isTempBlobName(String blobName) {
|
|
||||||
return blobName.startsWith(ChecksumBlobStoreFormat.TEMP_FILE_PREFIX);
|
|
||||||
}
|
|
||||||
|
|
||||||
protected BytesReference write(T obj) throws IOException {
|
protected BytesReference write(T obj) throws IOException {
|
||||||
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
|
try (BytesStreamOutput bytesStreamOutput = new BytesStreamOutput()) {
|
||||||
if (compress) {
|
if (compress) {
|
||||||
|
@ -222,10 +197,4 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
protected String tempBlobName(String name) {
|
|
||||||
return TEMP_FILE_PREFIX + String.format(Locale.ROOT, blobNameFormat, name);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,6 +39,7 @@ import java.text.DecimalFormatSymbols;
|
||||||
import java.text.NumberFormat;
|
import java.text.NumberFormat;
|
||||||
import java.text.ParseException;
|
import java.text.ParseException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
|
import java.util.Base64;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Objects;
|
import java.util.Objects;
|
||||||
import java.util.function.LongSupplier;
|
import java.util.function.LongSupplier;
|
||||||
|
@ -121,6 +122,50 @@ public interface DocValueFormat extends NamedWriteable {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
DocValueFormat BINARY = new DocValueFormat() {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getWriteableName() {
|
||||||
|
return "binary";
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object format(long value) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object format(double value) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String format(BytesRef value) {
|
||||||
|
return Base64.getEncoder()
|
||||||
|
.withoutPadding()
|
||||||
|
.encodeToString(Arrays.copyOfRange(value.bytes, value.offset, value.offset + value.length));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long parseLong(String value, boolean roundUp, LongSupplier now) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double parseDouble(String value, boolean roundUp, LongSupplier now) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public BytesRef parseBytesRef(String value) {
|
||||||
|
return new BytesRef(Base64.getDecoder().decode(value));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
final class DateTime implements DocValueFormat {
|
final class DateTime implements DocValueFormat {
|
||||||
|
|
||||||
public static final String NAME = "date_time";
|
public static final String NAME = "date_time";
|
||||||
|
|
|
@ -645,6 +645,7 @@ public class SearchModule {
|
||||||
registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH);
|
registerValueFormat(DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH);
|
||||||
registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP);
|
registerValueFormat(DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP);
|
||||||
registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW);
|
registerValueFormat(DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW);
|
||||||
|
registerValueFormat(DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -102,6 +102,6 @@ public class ClusterUpdateSettingsResponseTests extends AbstractStreamableXConte
|
||||||
|
|
||||||
public void testOldSerialisation() throws IOException {
|
public void testOldSerialisation() throws IOException {
|
||||||
ClusterUpdateSettingsResponse original = createTestInstance();
|
ClusterUpdateSettingsResponse original = createTestInstance();
|
||||||
assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0_alpha1));
|
assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,8 @@
|
||||||
|
|
||||||
package org.elasticsearch.action.admin.indices.rollover;
|
package org.elasticsearch.action.admin.indices.rollover;
|
||||||
|
|
||||||
|
import com.carrotsearch.randomizedtesting.annotations.Repeat;
|
||||||
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.XContentParser;
|
import org.elasticsearch.common.xcontent.XContentParser;
|
||||||
|
@ -132,6 +134,6 @@ public class RolloverResponseTests extends AbstractStreamableXContentTestCase<Ro
|
||||||
|
|
||||||
public void testOldSerialisation() throws IOException {
|
public void testOldSerialisation() throws IOException {
|
||||||
RolloverResponse original = createTestInstance();
|
RolloverResponse original = createTestInstance();
|
||||||
assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_7_0_0_alpha1));
|
assertSerialization(original, VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, Version.V_6_4_0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,40 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.common.blobstore.fs;
|
||||||
|
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
import static org.hamcrest.Matchers.is;
|
||||||
|
import static org.hamcrest.Matchers.startsWith;
|
||||||
|
|
||||||
|
public class FsBlobContainerTests extends ESTestCase {
|
||||||
|
|
||||||
|
public void testTempBlobName() {
|
||||||
|
final String blobName = randomAlphaOfLengthBetween(1, 20);
|
||||||
|
final String tempBlobName = FsBlobContainer.tempBlobName(blobName);
|
||||||
|
assertThat(tempBlobName, startsWith("pending-"));
|
||||||
|
assertThat(tempBlobName, containsString(blobName));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void testIsTempBlobName() {
|
||||||
|
final String tempBlobName = FsBlobContainer.tempBlobName(randomAlphaOfLengthBetween(1, 20));
|
||||||
|
assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true));
|
||||||
|
}
|
||||||
|
}
|
|
@ -16,23 +16,27 @@
|
||||||
* specific language governing permissions and limitations
|
* specific language governing permissions and limitations
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.common.blobstore;
|
package org.elasticsearch.common.blobstore.fs;
|
||||||
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.nio.file.Path;
|
|
||||||
|
|
||||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||||
public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||||
|
|
||||||
protected BlobStore newBlobStore() throws IOException {
|
protected BlobStore newBlobStore() throws IOException {
|
||||||
Path tempDir = createTempDir();
|
final Settings settings;
|
||||||
Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
if (randomBoolean()) {
|
||||||
return new FsBlobStore(settings, tempDir);
|
settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
||||||
|
} else {
|
||||||
|
settings = Settings.EMPTY;
|
||||||
|
}
|
||||||
|
return new FsBlobStore(settings, createTempDir());
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -16,10 +16,12 @@
|
||||||
* specific language governing permissions and limitations
|
* specific language governing permissions and limitations
|
||||||
* under the License.
|
* under the License.
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.common.blobstore;
|
package org.elasticsearch.common.blobstore.fs;
|
||||||
|
|
||||||
import org.apache.lucene.util.LuceneTestCase;
|
import org.apache.lucene.util.LuceneTestCase;
|
||||||
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.bytes.BytesArray;
|
import org.elasticsearch.common.bytes.BytesArray;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
|
@ -32,10 +34,15 @@ import java.nio.file.Path;
|
||||||
|
|
||||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||||
public class FsBlobStoreTests extends ESBlobStoreTestCase {
|
public class FsBlobStoreTests extends ESBlobStoreTestCase {
|
||||||
|
|
||||||
protected BlobStore newBlobStore() throws IOException {
|
protected BlobStore newBlobStore() throws IOException {
|
||||||
Path tempDir = createTempDir();
|
final Settings settings;
|
||||||
Settings settings = randomBoolean() ? Settings.EMPTY : Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
if (randomBoolean()) {
|
||||||
return new FsBlobStore(settings, tempDir);
|
settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
||||||
|
} else {
|
||||||
|
settings = Settings.EMPTY;
|
||||||
|
}
|
||||||
|
return new FsBlobStore(settings, createTempDir());
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testReadOnly() throws Exception {
|
public void testReadOnly() throws Exception {
|
|
@ -46,16 +46,13 @@ import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.indices.IndicesService;
|
import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.ExecutionException;
|
|
||||||
import java.util.concurrent.atomic.AtomicBoolean;
|
import java.util.concurrent.atomic.AtomicBoolean;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.stream.Collectors;
|
import java.util.stream.Collectors;
|
||||||
|
@ -103,7 +100,7 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testSyncedFlush() throws ExecutionException, InterruptedException, IOException {
|
public void testSyncedFlush() throws Exception {
|
||||||
internalCluster().ensureAtLeastNumDataNodes(2);
|
internalCluster().ensureAtLeastNumDataNodes(2);
|
||||||
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get();
|
prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)).get();
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -246,16 +243,6 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
assertThat(indexResult.getFailure(), nullValue());
|
assertThat(indexResult.getFailure(), nullValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
private String syncedFlushDescription(ShardsSyncedFlushResult result) {
|
|
||||||
String detail = result.shardResponses().entrySet().stream()
|
|
||||||
.map(e -> "Shard [" + e.getKey() + "], result [" + e.getValue() + "]")
|
|
||||||
.collect(Collectors.joining(","));
|
|
||||||
return String.format(Locale.ROOT, "Total shards: [%d], failed: [%s], reason: [%s], detail: [%s]",
|
|
||||||
result.totalShards(), result.failed(), result.failureReason(), detail);
|
|
||||||
}
|
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
|
||||||
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
|
||||||
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
public void testSyncedFlushSkipOutOfSyncReplicas() throws Exception {
|
||||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||||
|
@ -281,7 +268,6 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i);
|
indexDoc(IndexShardTestCase.getEngine(outOfSyncReplica), "extra_" + i);
|
||||||
}
|
}
|
||||||
final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
final ShardsSyncedFlushResult partialResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||||
logger.info("Partial seal: {}", syncedFlushDescription(partialResult));
|
|
||||||
assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1));
|
assertThat(partialResult.totalShards(), equalTo(numberOfReplicas + 1));
|
||||||
assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas));
|
assertThat(partialResult.successfulShards(), equalTo(numberOfReplicas));
|
||||||
assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo(
|
assertThat(partialResult.shardResponses().get(outOfSyncReplica.routingEntry()).failureReason, equalTo(
|
||||||
|
@ -297,8 +283,6 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/29392")
|
|
||||||
@TestLogging("_root:DEBUG,org.elasticsearch.indices.flush:TRACE")
|
|
||||||
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
public void testDoNotRenewSyncedFlushWhenAllSealed() throws Exception {
|
||||||
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
internalCluster().ensureAtLeastNumDataNodes(between(2, 3));
|
||||||
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
final int numberOfReplicas = internalCluster().numDataNodes() - 1;
|
||||||
|
@ -315,11 +299,9 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
index("test", "doc", Integer.toString(i));
|
index("test", "doc", Integer.toString(i));
|
||||||
}
|
}
|
||||||
final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
final ShardsSyncedFlushResult firstSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||||
logger.info("First seal: {}", syncedFlushDescription(firstSeal));
|
|
||||||
assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
assertThat(firstSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||||
// Do not renew synced-flush
|
// Do not renew synced-flush
|
||||||
final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
final ShardsSyncedFlushResult secondSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||||
logger.info("Second seal: {}", syncedFlushDescription(secondSeal));
|
|
||||||
assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
assertThat(secondSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||||
assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId()));
|
assertThat(secondSeal.syncId(), equalTo(firstSeal.syncId()));
|
||||||
// Shards were updated, renew synced flush.
|
// Shards were updated, renew synced flush.
|
||||||
|
@ -328,7 +310,6 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
index("test", "doc", Integer.toString(i));
|
index("test", "doc", Integer.toString(i));
|
||||||
}
|
}
|
||||||
final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
final ShardsSyncedFlushResult thirdSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||||
logger.info("Third seal: {}", syncedFlushDescription(thirdSeal));
|
|
||||||
assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
assertThat(thirdSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||||
assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId())));
|
assertThat(thirdSeal.syncId(), not(equalTo(firstSeal.syncId())));
|
||||||
// Manually remove or change sync-id, renew synced flush.
|
// Manually remove or change sync-id, renew synced flush.
|
||||||
|
@ -344,7 +325,6 @@ public class FlushIT extends ESIntegTestCase {
|
||||||
assertThat(shard.commitStats().syncId(), nullValue());
|
assertThat(shard.commitStats().syncId(), nullValue());
|
||||||
}
|
}
|
||||||
final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
final ShardsSyncedFlushResult forthSeal = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||||
logger.info("Forth seal: {}", syncedFlushDescription(forthSeal));
|
|
||||||
assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
assertThat(forthSeal.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||||
assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId())));
|
assertThat(forthSeal.syncId(), not(equalTo(thirdSeal.syncId())));
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,6 +29,9 @@ import org.elasticsearch.test.InternalTestCluster;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.CountDownLatch;
|
import java.util.concurrent.CountDownLatch;
|
||||||
|
import java.util.concurrent.atomic.AtomicReference;
|
||||||
|
|
||||||
|
import static org.elasticsearch.test.ESTestCase.assertBusy;
|
||||||
|
|
||||||
/** Utils for SyncedFlush */
|
/** Utils for SyncedFlush */
|
||||||
public class SyncedFlushUtil {
|
public class SyncedFlushUtil {
|
||||||
|
@ -40,21 +43,31 @@ public class SyncedFlushUtil {
|
||||||
/**
|
/**
|
||||||
* Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)}
|
* Blocking version of {@link SyncedFlushService#attemptSyncedFlush(ShardId, ActionListener)}
|
||||||
*/
|
*/
|
||||||
public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) {
|
public static ShardsSyncedFlushResult attemptSyncedFlush(Logger logger, InternalTestCluster cluster, ShardId shardId) throws Exception {
|
||||||
|
/*
|
||||||
|
* When the last indexing operation is completed, we will fire a global checkpoint sync.
|
||||||
|
* Since a global checkpoint sync request is a replication request, it will acquire an index
|
||||||
|
* shard permit on the primary when executing. If this happens at the same time while we are
|
||||||
|
* issuing the synced-flush, the synced-flush request will fail as it thinks there are
|
||||||
|
* in-flight operations. We can avoid such situation by continuing issuing another synced-flush
|
||||||
|
* if the synced-flush failed due to the ongoing operations on the primary.
|
||||||
|
*/
|
||||||
SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
|
SyncedFlushService service = cluster.getInstance(SyncedFlushService.class);
|
||||||
logger.debug("Issue synced-flush on node [{}], shard [{}], cluster state [{}]",
|
AtomicReference<LatchedListener<ShardsSyncedFlushResult>> listenerHolder = new AtomicReference<>();
|
||||||
service.nodeName(), shardId, cluster.clusterService(service.nodeName()).state());
|
assertBusy(() -> {
|
||||||
LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener<>();
|
LatchedListener<ShardsSyncedFlushResult> listener = new LatchedListener<>();
|
||||||
service.attemptSyncedFlush(shardId, listener);
|
listenerHolder.set(listener);
|
||||||
try {
|
service.attemptSyncedFlush(shardId, listener);
|
||||||
listener.latch.await();
|
listener.latch.await();
|
||||||
} catch (InterruptedException e) {
|
if (listener.result != null && listener.result.failureReason() != null
|
||||||
Thread.currentThread().interrupt();
|
&& listener.result.failureReason().contains("ongoing operations on primary")) {
|
||||||
|
throw new AssertionError(listener.result.failureReason()); // cause the assert busy to retry
|
||||||
|
}
|
||||||
|
});
|
||||||
|
if (listenerHolder.get().error != null) {
|
||||||
|
throw ExceptionsHelper.convertToElastic(listenerHolder.get().error);
|
||||||
}
|
}
|
||||||
if (listener.error != null) {
|
return listenerHolder.get().result;
|
||||||
throw ExceptionsHelper.convertToElastic(listener.error);
|
|
||||||
}
|
|
||||||
return listener.result;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static final class LatchedListener<T> implements ActionListener<T> {
|
public static final class LatchedListener<T> implements ActionListener<T> {
|
||||||
|
|
|
@ -44,6 +44,7 @@ public class DocValueFormatTests extends ESTestCase {
|
||||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH));
|
entries.add(new Entry(DocValueFormat.class, DocValueFormat.GEOHASH.getWriteableName(), in -> DocValueFormat.GEOHASH));
|
||||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP));
|
entries.add(new Entry(DocValueFormat.class, DocValueFormat.IP.getWriteableName(), in -> DocValueFormat.IP));
|
||||||
entries.add(new Entry(DocValueFormat.class, DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW));
|
entries.add(new Entry(DocValueFormat.class, DocValueFormat.RAW.getWriteableName(), in -> DocValueFormat.RAW));
|
||||||
|
entries.add(new Entry(DocValueFormat.class, DocValueFormat.BINARY.getWriteableName(), in -> DocValueFormat.BINARY));
|
||||||
NamedWriteableRegistry registry = new NamedWriteableRegistry(entries);
|
NamedWriteableRegistry registry = new NamedWriteableRegistry(entries);
|
||||||
|
|
||||||
BytesStreamOutput out = new BytesStreamOutput();
|
BytesStreamOutput out = new BytesStreamOutput();
|
||||||
|
@ -82,6 +83,11 @@ public class DocValueFormatTests extends ESTestCase {
|
||||||
out.writeNamedWriteable(DocValueFormat.RAW);
|
out.writeNamedWriteable(DocValueFormat.RAW);
|
||||||
in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry);
|
in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry);
|
||||||
assertSame(DocValueFormat.RAW, in.readNamedWriteable(DocValueFormat.class));
|
assertSame(DocValueFormat.RAW, in.readNamedWriteable(DocValueFormat.class));
|
||||||
|
|
||||||
|
out = new BytesStreamOutput();
|
||||||
|
out.writeNamedWriteable(DocValueFormat.BINARY);
|
||||||
|
in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), registry);
|
||||||
|
assertSame(DocValueFormat.BINARY, in.readNamedWriteable(DocValueFormat.class));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRawFormat() {
|
public void testRawFormat() {
|
||||||
|
@ -96,6 +102,14 @@ public class DocValueFormatTests extends ESTestCase {
|
||||||
assertEquals("abc", DocValueFormat.RAW.format(new BytesRef("abc")));
|
assertEquals("abc", DocValueFormat.RAW.format(new BytesRef("abc")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testBinaryFormat() {
|
||||||
|
assertEquals("", DocValueFormat.BINARY.format(new BytesRef()));
|
||||||
|
assertEquals("KmQ", DocValueFormat.BINARY.format(new BytesRef(new byte[] {42, 100})));
|
||||||
|
|
||||||
|
assertEquals(new BytesRef(), DocValueFormat.BINARY.parseBytesRef(""));
|
||||||
|
assertEquals(new BytesRef(new byte[] {42, 100}), DocValueFormat.BINARY.parseBytesRef("KmQ"));
|
||||||
|
}
|
||||||
|
|
||||||
public void testBooleanFormat() {
|
public void testBooleanFormat() {
|
||||||
assertEquals(false, DocValueFormat.BOOLEAN.format(0));
|
assertEquals(false, DocValueFormat.BOOLEAN.format(0));
|
||||||
assertEquals(true, DocValueFormat.BOOLEAN.format(1));
|
assertEquals(true, DocValueFormat.BOOLEAN.format(1));
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.search.fields;
|
package org.elasticsearch.search.fields;
|
||||||
|
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchRequestBuilder;
|
import org.elasticsearch.action.search.SearchRequestBuilder;
|
||||||
import org.elasticsearch.action.search.SearchResponse;
|
import org.elasticsearch.action.search.SearchResponse;
|
||||||
|
@ -700,7 +701,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
assertThat(fields.get("test_field").getValue(), equalTo("foobar"));
|
assertThat(fields.get("test_field").getValue(), equalTo("foobar"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testFieldsPulledFromFieldData() throws Exception {
|
public void testDocValueFields() throws Exception {
|
||||||
createIndex("test");
|
createIndex("test");
|
||||||
|
|
||||||
String mapping = Strings
|
String mapping = Strings
|
||||||
|
@ -744,6 +745,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.endObject()
|
.endObject()
|
||||||
.startObject("binary_field")
|
.startObject("binary_field")
|
||||||
.field("type", "binary")
|
.field("type", "binary")
|
||||||
|
.field("doc_values", true) // off by default on binary fields
|
||||||
.endObject()
|
.endObject()
|
||||||
.startObject("ip_field")
|
.startObject("ip_field")
|
||||||
.field("type", "ip")
|
.field("type", "ip")
|
||||||
|
@ -766,6 +768,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.field("double_field", 6.0d)
|
.field("double_field", 6.0d)
|
||||||
.field("date_field", Joda.forPattern("dateOptionalTime").printer().print(date))
|
.field("date_field", Joda.forPattern("dateOptionalTime").printer().print(date))
|
||||||
.field("boolean_field", true)
|
.field("boolean_field", true)
|
||||||
|
.field("binary_field", new byte[] {42, 100})
|
||||||
.field("ip_field", "::1")
|
.field("ip_field", "::1")
|
||||||
.endObject()).execute().actionGet();
|
.endObject()).execute().actionGet();
|
||||||
|
|
||||||
|
@ -782,6 +785,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.addDocValueField("double_field")
|
.addDocValueField("double_field")
|
||||||
.addDocValueField("date_field")
|
.addDocValueField("date_field")
|
||||||
.addDocValueField("boolean_field")
|
.addDocValueField("boolean_field")
|
||||||
|
.addDocValueField("binary_field")
|
||||||
.addDocValueField("ip_field");
|
.addDocValueField("ip_field");
|
||||||
SearchResponse searchResponse = builder.execute().actionGet();
|
SearchResponse searchResponse = builder.execute().actionGet();
|
||||||
|
|
||||||
|
@ -790,7 +794,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
Set<String> fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet());
|
Set<String> fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet());
|
||||||
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
||||||
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field",
|
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field",
|
||||||
"ip_field")));
|
"binary_field", "ip_field")));
|
||||||
|
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2"));
|
||||||
|
@ -802,6 +806,8 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
|
||||||
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(),
|
||||||
|
equalTo(new BytesRef(new byte[] {42, 100})));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
|
||||||
|
|
||||||
builder = client().prepareSearch().setQuery(matchAllQuery())
|
builder = client().prepareSearch().setQuery(matchAllQuery())
|
||||||
|
@ -815,6 +821,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
.addDocValueField("double_field", "use_field_mapping")
|
.addDocValueField("double_field", "use_field_mapping")
|
||||||
.addDocValueField("date_field", "use_field_mapping")
|
.addDocValueField("date_field", "use_field_mapping")
|
||||||
.addDocValueField("boolean_field", "use_field_mapping")
|
.addDocValueField("boolean_field", "use_field_mapping")
|
||||||
|
.addDocValueField("binary_field", "use_field_mapping")
|
||||||
.addDocValueField("ip_field", "use_field_mapping");
|
.addDocValueField("ip_field", "use_field_mapping");
|
||||||
searchResponse = builder.execute().actionGet();
|
searchResponse = builder.execute().actionGet();
|
||||||
|
|
||||||
|
@ -823,7 +830,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet());
|
fields = new HashSet<>(searchResponse.getHits().getAt(0).getFields().keySet());
|
||||||
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
assertThat(fields, equalTo(newHashSet("byte_field", "short_field", "integer_field", "long_field",
|
||||||
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field",
|
"float_field", "double_field", "date_field", "boolean_field", "text_field", "keyword_field",
|
||||||
"ip_field")));
|
"binary_field", "ip_field")));
|
||||||
|
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("byte_field").getValue().toString(), equalTo("1"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("short_field").getValue().toString(), equalTo("2"));
|
||||||
|
@ -836,6 +843,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("boolean_field").getValue(), equalTo((Object) true));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("text_field").getValue(), equalTo("foo"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("keyword_field").getValue(), equalTo("foo"));
|
||||||
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("binary_field").getValue(), equalTo("KmQ"));
|
||||||
assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
|
assertThat(searchResponse.getHits().getAt(0).getFields().get("ip_field").getValue(), equalTo("::1"));
|
||||||
|
|
||||||
builder = client().prepareSearch().setQuery(matchAllQuery())
|
builder = client().prepareSearch().setQuery(matchAllQuery())
|
||||||
|
|
|
@ -100,7 +100,7 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
|
||||||
}
|
}
|
||||||
Thread.sleep(100);
|
Thread.sleep(100);
|
||||||
}
|
}
|
||||||
fail("Timeout!!!");
|
fail("Timeout waiting for node [" + node + "] to be blocked");
|
||||||
}
|
}
|
||||||
|
|
||||||
public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException {
|
public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException {
|
||||||
|
|
|
@ -224,52 +224,16 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
|
||||||
IOException writeBlobException = expectThrows(IOException.class, () -> {
|
IOException writeBlobException = expectThrows(IOException.class, () -> {
|
||||||
BlobContainer wrapper = new BlobContainerWrapper(blobContainer) {
|
BlobContainer wrapper = new BlobContainerWrapper(blobContainer) {
|
||||||
@Override
|
@Override
|
||||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
|
public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize) throws IOException {
|
||||||
throw new IOException("Exception thrown in writeBlob() for " + blobName);
|
throw new IOException("Exception thrown in writeBlobAtomic() for " + blobName);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
checksumFormat.writeAtomic(blobObj, wrapper, name);
|
checksumFormat.writeAtomic(blobObj, wrapper, name);
|
||||||
});
|
});
|
||||||
|
|
||||||
assertEquals("Exception thrown in writeBlob() for pending-" + name, writeBlobException.getMessage());
|
assertEquals("Exception thrown in writeBlobAtomic() for " + name, writeBlobException.getMessage());
|
||||||
assertEquals(0, writeBlobException.getSuppressed().length);
|
assertEquals(0, writeBlobException.getSuppressed().length);
|
||||||
}
|
}
|
||||||
{
|
|
||||||
IOException moveException = expectThrows(IOException.class, () -> {
|
|
||||||
BlobContainer wrapper = new BlobContainerWrapper(blobContainer) {
|
|
||||||
@Override
|
|
||||||
public void move(String sourceBlobName, String targetBlobName) throws IOException {
|
|
||||||
throw new IOException("Exception thrown in move() for " + sourceBlobName);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
checksumFormat.writeAtomic(blobObj, wrapper, name);
|
|
||||||
});
|
|
||||||
assertEquals("Exception thrown in move() for pending-" + name, moveException.getMessage());
|
|
||||||
assertEquals(0, moveException.getSuppressed().length);
|
|
||||||
}
|
|
||||||
{
|
|
||||||
IOException moveThenDeleteException = expectThrows(IOException.class, () -> {
|
|
||||||
BlobContainer wrapper = new BlobContainerWrapper(blobContainer) {
|
|
||||||
@Override
|
|
||||||
public void move(String sourceBlobName, String targetBlobName) throws IOException {
|
|
||||||
throw new IOException("Exception thrown in move() for " + sourceBlobName);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void deleteBlob(String blobName) throws IOException {
|
|
||||||
throw new IOException("Exception thrown in deleteBlob() for " + blobName);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
checksumFormat.writeAtomic(blobObj, wrapper, name);
|
|
||||||
});
|
|
||||||
|
|
||||||
assertEquals("Exception thrown in move() for pending-" + name, moveThenDeleteException.getMessage());
|
|
||||||
assertEquals(1, moveThenDeleteException.getSuppressed().length);
|
|
||||||
|
|
||||||
final Throwable suppressedThrowable = moveThenDeleteException.getSuppressed()[0];
|
|
||||||
assertTrue(suppressedThrowable instanceof IOException);
|
|
||||||
assertEquals("Exception thrown in deleteBlob() for pending-" + name, suppressedThrowable.getMessage());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected BlobStore createTestBlobStore() throws IOException {
|
protected BlobStore createTestBlobStore() throws IOException {
|
||||||
|
|
|
@ -53,11 +53,21 @@ public class BlobContainerWrapper implements BlobContainer {
|
||||||
delegate.writeBlob(blobName, inputStream, blobSize);
|
delegate.writeBlob(blobName, inputStream, blobSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException {
|
||||||
|
delegate.writeBlobAtomic(blobName, inputStream, blobSize);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void deleteBlob(String blobName) throws IOException {
|
public void deleteBlob(String blobName) throws IOException {
|
||||||
delegate.deleteBlob(blobName);
|
delegate.deleteBlob(blobName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteBlobIgnoringIfNotExists(final String blobName) throws IOException {
|
||||||
|
delegate.deleteBlobIgnoringIfNotExists(blobName);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, BlobMetaData> listBlobs() throws IOException {
|
public Map<String, BlobMetaData> listBlobs() throws IOException {
|
||||||
return delegate.listBlobs();
|
return delegate.listBlobs();
|
||||||
|
|
|
@ -19,6 +19,28 @@
|
||||||
|
|
||||||
package org.elasticsearch.snapshots.mockstore;
|
package org.elasticsearch.snapshots.mockstore;
|
||||||
|
|
||||||
|
import com.carrotsearch.randomizedtesting.RandomizedContext;
|
||||||
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
|
import org.elasticsearch.ElasticsearchException;
|
||||||
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
|
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
|
import org.elasticsearch.common.blobstore.fs.FsBlobContainer;
|
||||||
|
import org.elasticsearch.common.io.PathUtils;
|
||||||
|
import org.elasticsearch.common.settings.Setting;
|
||||||
|
import org.elasticsearch.common.settings.Setting.Property;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
import org.elasticsearch.env.Environment;
|
||||||
|
import org.elasticsearch.plugins.RepositoryPlugin;
|
||||||
|
import org.elasticsearch.repositories.IndexId;
|
||||||
|
import org.elasticsearch.repositories.Repository;
|
||||||
|
import org.elasticsearch.repositories.fs.FsRepository;
|
||||||
|
import org.elasticsearch.snapshots.SnapshotId;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
|
@ -29,31 +51,11 @@ import java.util.Arrays;
|
||||||
import java.util.Collections;
|
import java.util.Collections;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Random;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
import java.util.concurrent.ConcurrentMap;
|
import java.util.concurrent.ConcurrentMap;
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import com.carrotsearch.randomizedtesting.RandomizedContext;
|
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
|
||||||
import org.elasticsearch.ElasticsearchException;
|
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
|
||||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
|
||||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
|
||||||
import org.elasticsearch.common.io.PathUtils;
|
|
||||||
import org.elasticsearch.common.settings.Setting;
|
|
||||||
import org.elasticsearch.common.settings.Setting.Property;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
|
||||||
import org.elasticsearch.env.Environment;
|
|
||||||
import org.elasticsearch.plugins.RepositoryPlugin;
|
|
||||||
import org.elasticsearch.repositories.Repository;
|
|
||||||
import org.elasticsearch.repositories.IndexId;
|
|
||||||
import org.elasticsearch.repositories.fs.FsRepository;
|
|
||||||
import org.elasticsearch.snapshots.SnapshotId;
|
|
||||||
|
|
||||||
public class MockRepository extends FsRepository {
|
public class MockRepository extends FsRepository {
|
||||||
|
|
||||||
public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin {
|
public static class Plugin extends org.elasticsearch.plugins.Plugin implements RepositoryPlugin {
|
||||||
|
@ -325,6 +327,12 @@ public class MockRepository extends FsRepository {
|
||||||
super.deleteBlob(blobName);
|
super.deleteBlob(blobName);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void deleteBlobIgnoringIfNotExists(String blobName) throws IOException {
|
||||||
|
maybeIOExceptionOrBlock(blobName);
|
||||||
|
super.deleteBlobIgnoringIfNotExists(blobName);
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Map<String, BlobMetaData> listBlobs() throws IOException {
|
public Map<String, BlobMetaData> listBlobs() throws IOException {
|
||||||
maybeIOExceptionOrBlock("");
|
maybeIOExceptionOrBlock("");
|
||||||
|
@ -365,6 +373,31 @@ public class MockRepository extends FsRepository {
|
||||||
maybeIOExceptionOrBlock(blobName);
|
maybeIOExceptionOrBlock(blobName);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException {
|
||||||
|
final Random random = RandomizedContext.current().getRandom();
|
||||||
|
if (random.nextBoolean()) {
|
||||||
|
if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) {
|
||||||
|
// Simulate a failure between the write and move operation in FsBlobContainer
|
||||||
|
final String tempBlobName = FsBlobContainer.tempBlobName(blobName);
|
||||||
|
super.writeBlob(tempBlobName, inputStream, blobSize);
|
||||||
|
maybeIOExceptionOrBlock(blobName);
|
||||||
|
final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate();
|
||||||
|
fsBlobContainer.move(tempBlobName, blobName);
|
||||||
|
} else {
|
||||||
|
// Atomic write since it is potentially supported
|
||||||
|
// by the delegating blob container
|
||||||
|
maybeIOExceptionOrBlock(blobName);
|
||||||
|
super.writeBlobAtomic(blobName, inputStream, blobSize);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Simulate a non-atomic write since many blob container
|
||||||
|
// implementations does not support atomic write
|
||||||
|
maybeIOExceptionOrBlock(blobName);
|
||||||
|
super.writeBlob(blobName, inputStream, blobSize);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -158,7 +158,11 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
|
||||||
|
|
||||||
protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException {
|
protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException {
|
||||||
try (InputStream stream = bytesArray.streamInput()) {
|
try (InputStream stream = bytesArray.streamInput()) {
|
||||||
container.writeBlob(blobName, stream, bytesArray.length());
|
if (randomBoolean()) {
|
||||||
|
container.writeBlob(blobName, stream, bytesArray.length());
|
||||||
|
} else {
|
||||||
|
container.writeBlobAtomic(blobName, stream, bytesArray.length());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -411,7 +411,8 @@ public class LicenseService extends AbstractLifecycleComponent implements Cluste
|
||||||
// auto-generate license if no licenses ever existed or if the current license is basic and
|
// auto-generate license if no licenses ever existed or if the current license is basic and
|
||||||
// needs extended or if the license signature needs to be updated. this will trigger a subsequent cluster changed event
|
// needs extended or if the license signature needs to be updated. this will trigger a subsequent cluster changed event
|
||||||
if (currentClusterState.getNodes().isLocalNodeElectedMaster() &&
|
if (currentClusterState.getNodes().isLocalNodeElectedMaster() &&
|
||||||
(noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) || LicenseUtils.signatureNeedsUpdate(currentLicense))) {
|
(noLicense || LicenseUtils.licenseNeedsExtended(currentLicense) ||
|
||||||
|
LicenseUtils.signatureNeedsUpdate(currentLicense, currentClusterState.nodes()))) {
|
||||||
registerOrUpdateSelfGeneratedLicense();
|
registerOrUpdateSelfGeneratedLicense();
|
||||||
}
|
}
|
||||||
} else if (logger.isDebugEnabled()) {
|
} else if (logger.isDebugEnabled()) {
|
||||||
|
|
|
@ -6,8 +6,12 @@
|
||||||
package org.elasticsearch.license;
|
package org.elasticsearch.license;
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchSecurityException;
|
import org.elasticsearch.ElasticsearchSecurityException;
|
||||||
|
import org.elasticsearch.Version;
|
||||||
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.rest.RestStatus;
|
import org.elasticsearch.rest.RestStatus;
|
||||||
|
|
||||||
|
import java.util.stream.StreamSupport;
|
||||||
|
|
||||||
public class LicenseUtils {
|
public class LicenseUtils {
|
||||||
|
|
||||||
public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature";
|
public static final String EXPIRED_FEATURE_METADATA = "es.license.expired.feature";
|
||||||
|
@ -42,8 +46,25 @@ public class LicenseUtils {
|
||||||
* Checks if the signature of a self generated license with older version needs to be
|
* Checks if the signature of a self generated license with older version needs to be
|
||||||
* recreated with the new key
|
* recreated with the new key
|
||||||
*/
|
*/
|
||||||
public static boolean signatureNeedsUpdate(License license) {
|
public static boolean signatureNeedsUpdate(License license, DiscoveryNodes currentNodes) {
|
||||||
|
assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version";
|
||||||
|
|
||||||
return ("basic".equals(license.type()) || "trial".equals(license.type())) &&
|
return ("basic".equals(license.type()) || "trial".equals(license.type())) &&
|
||||||
(license.version() < License.VERSION_CRYPTO_ALGORITHMS);
|
// only upgrade signature when all nodes are ready to deserialize the new signature
|
||||||
|
(license.version() < License.VERSION_CRYPTO_ALGORITHMS &&
|
||||||
|
compatibleLicenseVersion(currentNodes) == License.VERSION_CRYPTO_ALGORITHMS
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
public static int compatibleLicenseVersion(DiscoveryNodes currentNodes) {
|
||||||
|
assert License.VERSION_CRYPTO_ALGORITHMS == License.VERSION_CURRENT : "update this method when adding a new version";
|
||||||
|
|
||||||
|
if (StreamSupport.stream(currentNodes.spliterator(), false)
|
||||||
|
.allMatch(node -> node.getVersion().onOrAfter(Version.V_6_4_0))) {
|
||||||
|
// License.VERSION_CRYPTO_ALGORITHMS was introduced in 6.4.0
|
||||||
|
return License.VERSION_CRYPTO_ALGORITHMS;
|
||||||
|
} else {
|
||||||
|
return License.VERSION_START_DATE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.license;
|
package org.elasticsearch.license;
|
||||||
|
|
||||||
|
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
@ -26,8 +27,8 @@ import static org.elasticsearch.license.CryptUtils.decrypt;
|
||||||
|
|
||||||
class SelfGeneratedLicense {
|
class SelfGeneratedLicense {
|
||||||
|
|
||||||
public static License create(License.Builder specBuilder) {
|
public static License create(License.Builder specBuilder, DiscoveryNodes currentNodes) {
|
||||||
return create(specBuilder, License.VERSION_CURRENT);
|
return create(specBuilder, LicenseUtils.compatibleLicenseVersion(currentNodes));
|
||||||
}
|
}
|
||||||
|
|
||||||
public static License create(License.Builder specBuilder, int version) {
|
public static License create(License.Builder specBuilder, int version) {
|
||||||
|
|
|
@ -73,7 +73,7 @@ public class StartBasicClusterTask extends ClusterStateUpdateTask {
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.type("basic")
|
.type("basic")
|
||||||
.expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
|
.expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
|
||||||
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
|
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes());
|
||||||
if (request.isAcknowledged() == false && currentLicense != null) {
|
if (request.isAcknowledged() == false && currentLicense != null) {
|
||||||
Map<String, String[]> ackMessages = LicenseService.getAckMessages(selfGeneratedLicense, currentLicense);
|
Map<String, String[]> ackMessages = LicenseService.getAckMessages(selfGeneratedLicense, currentLicense);
|
||||||
if (ackMessages.isEmpty() == false) {
|
if (ackMessages.isEmpty() == false) {
|
||||||
|
|
|
@ -82,7 +82,7 @@ public class StartTrialClusterTask extends ClusterStateUpdateTask {
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.type(request.getType())
|
.type(request.getType())
|
||||||
.expiryDate(expiryDate);
|
.expiryDate(expiryDate);
|
||||||
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
|
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes());
|
||||||
LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT);
|
LicensesMetaData newLicensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT);
|
||||||
mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData);
|
mdBuilder.putCustom(LicensesMetaData.TYPE, newLicensesMetaData);
|
||||||
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
return ClusterState.builder(currentState).metaData(mdBuilder).build();
|
||||||
|
|
|
@ -61,7 +61,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask {
|
||||||
"]. Must be trial or basic.");
|
"]. Must be trial or basic.");
|
||||||
}
|
}
|
||||||
return updateWithLicense(currentState, type);
|
return updateWithLicense(currentState, type);
|
||||||
} else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense())) {
|
} else if (LicenseUtils.signatureNeedsUpdate(currentLicensesMetaData.getLicense(), currentState.nodes())) {
|
||||||
return updateLicenseSignature(currentState, currentLicensesMetaData);
|
return updateLicenseSignature(currentState, currentLicensesMetaData);
|
||||||
} else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) {
|
} else if (LicenseUtils.licenseNeedsExtended(currentLicensesMetaData.getLicense())) {
|
||||||
return extendBasic(currentState, currentLicensesMetaData);
|
return extendBasic(currentState, currentLicensesMetaData);
|
||||||
|
@ -87,7 +87,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask {
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.type(type)
|
.type(type)
|
||||||
.expiryDate(expiryDate);
|
.expiryDate(expiryDate);
|
||||||
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
|
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes());
|
||||||
Version trialVersion = currentLicenseMetaData.getMostRecentTrialVersion();
|
Version trialVersion = currentLicenseMetaData.getMostRecentTrialVersion();
|
||||||
LicensesMetaData newLicenseMetadata = new LicensesMetaData(selfGeneratedLicense, trialVersion);
|
LicensesMetaData newLicenseMetadata = new LicensesMetaData(selfGeneratedLicense, trialVersion);
|
||||||
mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata);
|
mdBuilder.putCustom(LicensesMetaData.TYPE, newLicenseMetadata);
|
||||||
|
@ -120,7 +120,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask {
|
||||||
.issueDate(currentLicense.issueDate())
|
.issueDate(currentLicense.issueDate())
|
||||||
.type("basic")
|
.type("basic")
|
||||||
.expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
|
.expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
|
||||||
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
|
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentLicense.version());
|
||||||
Version trialVersion = currentLicenseMetadata.getMostRecentTrialVersion();
|
Version trialVersion = currentLicenseMetadata.getMostRecentTrialVersion();
|
||||||
return new LicensesMetaData(selfGeneratedLicense, trialVersion);
|
return new LicensesMetaData(selfGeneratedLicense, trialVersion);
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ public class StartupSelfGeneratedLicenseTask extends ClusterStateUpdateTask {
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.type(type)
|
.type(type)
|
||||||
.expiryDate(expiryDate);
|
.expiryDate(expiryDate);
|
||||||
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder);
|
License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder, currentState.nodes());
|
||||||
LicensesMetaData licensesMetaData;
|
LicensesMetaData licensesMetaData;
|
||||||
if ("trial".equals(type)) {
|
if ("trial".equals(type)) {
|
||||||
licensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT);
|
licensesMetaData = new LicensesMetaData(selfGeneratedLicense, Version.CURRENT);
|
||||||
|
|
|
@ -104,7 +104,7 @@ public class LicenseRegistrationTests extends AbstractLicenseServiceTestCase {
|
||||||
.issueDate(dateMath("now-10h", now))
|
.issueDate(dateMath("now-10h", now))
|
||||||
.type("basic")
|
.type("basic")
|
||||||
.expiryDate(dateMath("now-2h", now));
|
.expiryDate(dateMath("now-2h", now));
|
||||||
License license = SelfGeneratedLicense.create(builder);
|
License license = SelfGeneratedLicense.create(builder, License.VERSION_CURRENT);
|
||||||
|
|
||||||
XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY);
|
XPackLicenseState licenseState = new XPackLicenseState(Settings.EMPTY);
|
||||||
setInitialState(license, licenseState, Settings.EMPTY);
|
setInitialState(license, licenseState, Settings.EMPTY);
|
||||||
|
@ -125,4 +125,4 @@ public class LicenseRegistrationTests extends AbstractLicenseServiceTestCase {
|
||||||
assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate());
|
assertEquals(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS, licenseMetaData.getLicense().expiryDate());
|
||||||
assertEquals(uid, licenseMetaData.getLicense().uid());
|
assertEquals(uid, licenseMetaData.getLicense().uid());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -111,7 +111,7 @@ public class LicenseSerializationTests extends ESTestCase {
|
||||||
.issueDate(now)
|
.issueDate(now)
|
||||||
.type("basic")
|
.type("basic")
|
||||||
.expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
|
.expiryDate(LicenseService.BASIC_SELF_GENERATED_LICENSE_EXPIRATION_MILLIS);
|
||||||
License license = SelfGeneratedLicense.create(specBuilder);
|
License license = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT);
|
||||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON);
|
||||||
license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true")));
|
license.toXContent(builder, new ToXContent.MapParams(Collections.singletonMap(License.REST_VIEW_MODE, "true")));
|
||||||
builder.flush();
|
builder.flush();
|
||||||
|
|
|
@ -95,7 +95,7 @@ public class LicensesMetaDataSerializationTests extends ESTestCase {
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.type(randomBoolean() ? "trial" : "basic")
|
.type(randomBoolean() ? "trial" : "basic")
|
||||||
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
||||||
final License trialLicense = SelfGeneratedLicense.create(specBuilder);
|
final License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT);
|
||||||
LicensesMetaData licensesMetaData = new LicensesMetaData(trialLicense, Version.CURRENT);
|
LicensesMetaData licensesMetaData = new LicensesMetaData(trialLicense, Version.CURRENT);
|
||||||
XContentBuilder builder = XContentFactory.jsonBuilder();
|
XContentBuilder builder = XContentFactory.jsonBuilder();
|
||||||
builder.startObject();
|
builder.startObject();
|
||||||
|
|
|
@ -34,7 +34,7 @@ public class SelfGeneratedLicenseTests extends ESTestCase {
|
||||||
.type(randomBoolean() ? "trial" : "basic")
|
.type(randomBoolean() ? "trial" : "basic")
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
||||||
License trialLicense = SelfGeneratedLicense.create(specBuilder);
|
License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT);
|
||||||
assertThat(SelfGeneratedLicense.verify(trialLicense), equalTo(true));
|
assertThat(SelfGeneratedLicense.verify(trialLicense), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ public class SelfGeneratedLicenseTests extends ESTestCase {
|
||||||
.maxNodes(5)
|
.maxNodes(5)
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
||||||
License trialLicense = SelfGeneratedLicense.create(specBuilder);
|
License trialLicense = SelfGeneratedLicense.create(specBuilder, License.VERSION_CURRENT);
|
||||||
final String originalSignature = trialLicense.signature();
|
final String originalSignature = trialLicense.signature();
|
||||||
License tamperedLicense = License.builder().fromLicenseSpec(trialLicense, originalSignature)
|
License tamperedLicense = License.builder().fromLicenseSpec(trialLicense, originalSignature)
|
||||||
.expiryDate(System.currentTimeMillis() + TimeValue.timeValueHours(5).getMillis())
|
.expiryDate(System.currentTimeMillis() + TimeValue.timeValueHours(5).getMillis())
|
||||||
|
@ -70,7 +70,8 @@ public class SelfGeneratedLicenseTests extends ESTestCase {
|
||||||
.issueDate(issueDate)
|
.issueDate(issueDate)
|
||||||
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
.expiryDate(issueDate + TimeValue.timeValueHours(2).getMillis());
|
||||||
License pre20TrialLicense = specBuilder.build();
|
License pre20TrialLicense = specBuilder.build();
|
||||||
License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial"));
|
License license = SelfGeneratedLicense.create(License.builder().fromPre20LicenseSpec(pre20TrialLicense).type("trial"),
|
||||||
|
License.VERSION_CURRENT);
|
||||||
assertThat(SelfGeneratedLicense.verify(license), equalTo(true));
|
assertThat(SelfGeneratedLicense.verify(license), equalTo(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ package org.elasticsearch.xpack.security;
|
||||||
|
|
||||||
import org.apache.logging.log4j.Logger;
|
import org.apache.logging.log4j.Logger;
|
||||||
import org.apache.lucene.util.SetOnce;
|
import org.apache.lucene.util.SetOnce;
|
||||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.ActionListener;
|
import org.elasticsearch.action.ActionListener;
|
||||||
import org.elasticsearch.action.ActionRequest;
|
import org.elasticsearch.action.ActionRequest;
|
||||||
|
@ -17,7 +16,6 @@ import org.elasticsearch.action.support.DestructiveOperations;
|
||||||
import org.elasticsearch.bootstrap.BootstrapCheck;
|
import org.elasticsearch.bootstrap.BootstrapCheck;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.ClusterState;
|
import org.elasticsearch.cluster.ClusterState;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||||
|
@ -112,7 +110,6 @@ import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField;
|
||||||
import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler;
|
import org.elasticsearch.xpack.core.security.authc.DefaultAuthenticationFailureHandler;
|
||||||
import org.elasticsearch.xpack.core.security.authc.Realm;
|
import org.elasticsearch.xpack.core.security.authc.Realm;
|
||||||
import org.elasticsearch.xpack.core.security.authc.RealmSettings;
|
import org.elasticsearch.xpack.core.security.authc.RealmSettings;
|
||||||
import org.elasticsearch.xpack.core.security.authc.TokenMetaData;
|
|
||||||
import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken;
|
import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken;
|
||||||
import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
import org.elasticsearch.xpack.core.security.authz.AuthorizationServiceField;
|
||||||
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
import org.elasticsearch.xpack.core.security.authz.RoleDescriptor;
|
||||||
|
@ -934,7 +931,8 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings),
|
return new ValidateTLSOnJoin(XPackSettings.TRANSPORT_SSL_ENABLED.get(settings),
|
||||||
DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))
|
DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))
|
||||||
.andThen(new ValidateUpgradedSecurityIndex());
|
.andThen(new ValidateUpgradedSecurityIndex())
|
||||||
|
.andThen(new ValidateLicenseCanBeDeserialized());
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
@ -971,6 +969,17 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static final class ValidateLicenseCanBeDeserialized implements BiConsumer<DiscoveryNode, ClusterState> {
|
||||||
|
@Override
|
||||||
|
public void accept(DiscoveryNode node, ClusterState state) {
|
||||||
|
License license = LicenseService.getLicense(state.metaData());
|
||||||
|
if (license != null && license.version() >= License.VERSION_CRYPTO_ALGORITHMS && node.getVersion().before(Version.V_6_4_0)) {
|
||||||
|
throw new IllegalStateException("node " + node + " is on version [" + node.getVersion() +
|
||||||
|
"] that cannot deserialize the license format [" + license.version() + "], upgrade node to at least 6.4.0");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void reloadSPI(ClassLoader loader) {
|
public void reloadSPI(ClassLoader loader) {
|
||||||
securityExtensions.addAll(SecurityExtension.loadExtensions(loader));
|
securityExtensions.addAll(SecurityExtension.loadExtensions(loader));
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.license.TestUtils;
|
||||||
import org.elasticsearch.license.XPackLicenseState;
|
import org.elasticsearch.license.XPackLicenseState;
|
||||||
import org.elasticsearch.plugins.MapperPlugin;
|
import org.elasticsearch.plugins.MapperPlugin;
|
||||||
import org.elasticsearch.test.ESTestCase;
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
import org.elasticsearch.test.VersionUtils;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||||
import org.elasticsearch.xpack.core.XPackSettings;
|
import org.elasticsearch.xpack.core.XPackSettings;
|
||||||
|
@ -278,6 +279,19 @@ public class SecurityTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public void testJoinValidatorForLicenseDeserialization() throws Exception {
|
||||||
|
DiscoveryNode node = new DiscoveryNode("foo", buildNewFakeTransportAddress(),
|
||||||
|
VersionUtils.randomVersionBetween(random(), null, Version.V_6_3_0));
|
||||||
|
MetaData.Builder builder = MetaData.builder();
|
||||||
|
License license = TestUtils.generateSignedLicense(null,
|
||||||
|
randomIntBetween(License.VERSION_CRYPTO_ALGORITHMS, License.VERSION_CURRENT), -1, TimeValue.timeValueHours(24));
|
||||||
|
TestUtils.putLicense(builder, license);
|
||||||
|
ClusterState state = ClusterState.builder(ClusterName.DEFAULT).metaData(builder.build()).build();
|
||||||
|
IllegalStateException e = expectThrows(IllegalStateException.class,
|
||||||
|
() -> new Security.ValidateLicenseCanBeDeserialized().accept(node, state));
|
||||||
|
assertThat(e.getMessage(), containsString("cannot deserialize the license format"));
|
||||||
|
}
|
||||||
|
|
||||||
public void testIndexJoinValidator_Old_And_Rolling() throws Exception {
|
public void testIndexJoinValidator_Old_And_Rolling() throws Exception {
|
||||||
createComponents(Settings.EMPTY);
|
createComponents(Settings.EMPTY);
|
||||||
BiConsumer<DiscoveryNode, ClusterState> joinValidator = security.getJoinValidator();
|
BiConsumer<DiscoveryNode, ClusterState> joinValidator = security.getJoinValidator();
|
||||||
|
@ -345,7 +359,7 @@ public class SecurityTests extends ESTestCase {
|
||||||
.nodes(discoveryNodes).build();
|
.nodes(discoveryNodes).build();
|
||||||
joinValidator.accept(node, clusterState);
|
joinValidator.accept(node, clusterState);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testGetFieldFilterSecurityEnabled() throws Exception {
|
public void testGetFieldFilterSecurityEnabled() throws Exception {
|
||||||
createComponents(Settings.EMPTY);
|
createComponents(Settings.EMPTY);
|
||||||
Function<String, Predicate<String>> fieldFilter = security.getFieldFilter();
|
Function<String, Predicate<String>> fieldFilter = security.getFieldFilter();
|
||||||
|
|
Loading…
Reference in New Issue