Merge branch 'master' into feature/client_aggs_parsing
This commit is contained in:
commit
5541debd5a
|
@ -76,6 +76,8 @@ public class Version implements Comparable<Version> {
|
|||
public static final Version V_5_3_0_UNRELEASED = new Version(V_5_3_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_1);
|
||||
public static final int V_5_3_1_ID_UNRELEASED = 5030199;
|
||||
public static final Version V_5_3_1_UNRELEASED = new Version(V_5_3_1_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_3_2_ID_UNRELEASED = 5030299;
|
||||
public static final Version V_5_3_2_UNRELEASED = new Version(V_5_3_2_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_4_2);
|
||||
public static final int V_5_4_0_ID_UNRELEASED = 5040099;
|
||||
public static final Version V_5_4_0_UNRELEASED = new Version(V_5_4_0_ID_UNRELEASED, org.apache.lucene.util.Version.LUCENE_6_5_0);
|
||||
public static final int V_5_5_0_ID_UNRELEASED = 5050099;
|
||||
|
@ -104,6 +106,8 @@ public class Version implements Comparable<Version> {
|
|||
return V_5_5_0_UNRELEASED;
|
||||
case V_5_4_0_ID_UNRELEASED:
|
||||
return V_5_4_0_UNRELEASED;
|
||||
case V_5_3_2_ID_UNRELEASED:
|
||||
return V_5_3_2_UNRELEASED;
|
||||
case V_5_3_1_ID_UNRELEASED:
|
||||
return V_5_3_1_UNRELEASED;
|
||||
case V_5_3_0_ID_UNRELEASED:
|
||||
|
|
|
@ -52,19 +52,19 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.node.NodeClosedException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -75,6 +75,8 @@ import java.util.concurrent.atomic.AtomicInteger;
|
|||
import java.util.function.LongSupplier;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
/**
|
||||
* Groups bulk request items by shard, optionally creating non-existent indices and
|
||||
* delegates to {@link TransportShardBulkAction} for shard-level bulk execution
|
||||
|
@ -139,30 +141,46 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
final AtomicArray<BulkItemResponse> responses = new AtomicArray<>(bulkRequest.requests.size());
|
||||
|
||||
if (needToCheck()) {
|
||||
// Keep track of all unique indices and all unique types per index for the create index requests:
|
||||
final Set<String> autoCreateIndices = bulkRequest.requests.stream()
|
||||
// Attempt to create all the indices that we're going to need during the bulk before we start.
|
||||
// Step 1: collect all the indices in the request
|
||||
final Set<String> indices = bulkRequest.requests.stream()
|
||||
.map(DocWriteRequest::index)
|
||||
.collect(Collectors.toSet());
|
||||
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
|
||||
/* Step 2: filter that to indices that don't exist and we can create. At the same time build a map of indices we can't create
|
||||
* that we'll use when we try to run the requests. */
|
||||
final Map<String, IndexNotFoundException> indicesThatCannotBeCreated = new HashMap<>();
|
||||
Set<String> autoCreateIndices = new HashSet<>();
|
||||
ClusterState state = clusterService.state();
|
||||
for (String index : autoCreateIndices) {
|
||||
if (shouldAutoCreate(index, state)) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
createIndexRequest.cause("auto(bulk api)");
|
||||
createIndexRequest.masterNodeTimeout(bulkRequest.timeout());
|
||||
createIndexAction.execute(createIndexRequest, new ActionListener<CreateIndexResponse>() {
|
||||
for (String index : indices) {
|
||||
boolean shouldAutoCreate;
|
||||
try {
|
||||
shouldAutoCreate = shouldAutoCreate(index, state);
|
||||
} catch (IndexNotFoundException e) {
|
||||
shouldAutoCreate = false;
|
||||
indicesThatCannotBeCreated.put(index, e);
|
||||
}
|
||||
if (shouldAutoCreate) {
|
||||
autoCreateIndices.add(index);
|
||||
}
|
||||
}
|
||||
// Step 3: create all the indices that are missing, if there are any missing. start the bulk after all the creates come back.
|
||||
if (autoCreateIndices.isEmpty()) {
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
|
||||
} else {
|
||||
final AtomicInteger counter = new AtomicInteger(autoCreateIndices.size());
|
||||
for (String index : autoCreateIndices) {
|
||||
createIndex(index, bulkRequest.timeout(), new ActionListener<CreateIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(CreateIndexResponse result) {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses);
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses, indicesThatCannotBeCreated);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (!(ExceptionsHelper.unwrapCause(e) instanceof ResourceAlreadyExistsException)) {
|
||||
// fail all requests involving this index, if create didnt work
|
||||
// fail all requests involving this index, if create didn't work
|
||||
for (int i = 0; i < bulkRequest.requests.size(); i++) {
|
||||
DocWriteRequest request = bulkRequest.requests.get(i);
|
||||
if (request != null && setResponseFailureIfIndexMatches(responses, i, request, index, e)) {
|
||||
|
@ -174,18 +192,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
executeBulk(task, bulkRequest, startTime, ActionListener.wrap(listener::onResponse, inner -> {
|
||||
inner.addSuppressed(e);
|
||||
listener.onFailure(inner);
|
||||
}), responses);
|
||||
}), responses, indicesThatCannotBeCreated);
|
||||
}
|
||||
}
|
||||
});
|
||||
} else {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses);
|
||||
executeBulk(task, bulkRequest, startTime, listener, responses, emptyMap());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -197,6 +211,14 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
return autoCreateIndex.shouldAutoCreate(index, state);
|
||||
}
|
||||
|
||||
void createIndex(String index, TimeValue timeout, ActionListener<CreateIndexResponse> listener) {
|
||||
CreateIndexRequest createIndexRequest = new CreateIndexRequest();
|
||||
createIndexRequest.index(index);
|
||||
createIndexRequest.cause("auto(bulk api)");
|
||||
createIndexRequest.masterNodeTimeout(timeout);
|
||||
createIndexAction.execute(createIndexRequest, listener);
|
||||
}
|
||||
|
||||
private boolean setResponseFailureIfIndexMatches(AtomicArray<BulkItemResponse> responses, int idx, DocWriteRequest request, String index, Exception e) {
|
||||
if (index.equals(request.index())) {
|
||||
responses.set(idx, new BulkItemResponse(idx, request.opType(), new BulkItemResponse.Failure(request.index(), request.type(), request.id(), e)));
|
||||
|
@ -220,14 +242,16 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
private final AtomicArray<BulkItemResponse> responses;
|
||||
private final long startTimeNanos;
|
||||
private final ClusterStateObserver observer;
|
||||
private final Map<String, IndexNotFoundException> indicesThatCannotBeCreated;
|
||||
|
||||
BulkOperation(Task task, BulkRequest bulkRequest, ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses, long startTimeNanos) {
|
||||
BulkOperation(Task task, BulkRequest bulkRequest, ActionListener<BulkResponse> listener, AtomicArray<BulkItemResponse> responses,
|
||||
long startTimeNanos, Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
|
||||
this.task = task;
|
||||
this.bulkRequest = bulkRequest;
|
||||
this.listener = listener;
|
||||
this.responses = responses;
|
||||
this.startTimeNanos = startTimeNanos;
|
||||
this.indicesThatCannotBeCreated = indicesThatCannotBeCreated;
|
||||
this.observer = new ClusterStateObserver(clusterService, bulkRequest.timeout(), logger, threadPool.getThreadContext());
|
||||
}
|
||||
|
||||
|
@ -250,7 +274,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
if (docWriteRequest == null) {
|
||||
continue;
|
||||
}
|
||||
if (addFailureIfIndexIsUnavailable(docWriteRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
|
||||
if (addFailureIfIndexIsUnavailable(docWriteRequest, i, concreteIndices, metaData)) {
|
||||
continue;
|
||||
}
|
||||
Index concreteIndex = concreteIndices.resolveIfAbsent(docWriteRequest);
|
||||
|
@ -393,42 +417,44 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses ) {
|
||||
new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos).run();
|
||||
}
|
||||
|
||||
private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, BulkRequest bulkRequest, AtomicArray<BulkItemResponse> responses, int idx,
|
||||
final ConcreteIndices concreteIndices,
|
||||
final MetaData metaData) {
|
||||
Index concreteIndex = concreteIndices.getConcreteIndex(request.index());
|
||||
Exception unavailableException = null;
|
||||
if (concreteIndex == null) {
|
||||
try {
|
||||
concreteIndex = concreteIndices.resolveIfAbsent(request);
|
||||
} catch (IndexClosedException | IndexNotFoundException ex) {
|
||||
// Fix for issue where bulk request references an index that
|
||||
// cannot be auto-created see issue #8125
|
||||
unavailableException = ex;
|
||||
private boolean addFailureIfIndexIsUnavailable(DocWriteRequest request, int idx, final ConcreteIndices concreteIndices,
|
||||
final MetaData metaData) {
|
||||
IndexNotFoundException cannotCreate = indicesThatCannotBeCreated.get(request.index());
|
||||
if (cannotCreate != null) {
|
||||
addFailure(request, idx, cannotCreate);
|
||||
return true;
|
||||
}
|
||||
Index concreteIndex = concreteIndices.getConcreteIndex(request.index());
|
||||
if (concreteIndex == null) {
|
||||
try {
|
||||
concreteIndex = concreteIndices.resolveIfAbsent(request);
|
||||
} catch (IndexClosedException | IndexNotFoundException ex) {
|
||||
addFailure(request, idx, ex);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (unavailableException == null) {
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(concreteIndex);
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
unavailableException = new IndexClosedException(concreteIndex);
|
||||
addFailure(request, idx, new IndexClosedException(concreteIndex));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
if (unavailableException != null) {
|
||||
|
||||
private void addFailure(DocWriteRequest request, int idx, Exception unavailableException) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(request.index(), request.type(), request.id(),
|
||||
unavailableException);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(idx, request.opType(), failure);
|
||||
responses.set(idx, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
bulkRequest.requests.set(idx, null);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener,
|
||||
final AtomicArray<BulkItemResponse> responses, Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
|
||||
new BulkOperation(task, bulkRequest, listener, responses, startTimeNanos, indicesThatCannotBeCreated).run();
|
||||
}
|
||||
|
||||
private static class ConcreteIndices {
|
||||
|
|
|
@ -415,7 +415,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
static {
|
||||
assert Version.CURRENT.minimumCompatibilityVersion().after(Version.V_5_0_0) == false:
|
||||
"Remove logic handling NoOp result from primary response; see TODO in replicaItemExecutionMode" +
|
||||
" as the current minimum compatible version [" +
|
||||
" as the current minimum compatible version [" +
|
||||
Version.CURRENT.minimumCompatibilityVersion() + "] is after 5.0";
|
||||
}
|
||||
|
||||
|
@ -565,7 +565,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
final long version = primaryResponse.getVersion();
|
||||
final long seqNo = primaryResponse.getSeqNo();
|
||||
final SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.REPLICA, shardId.getIndexName(),
|
||||
SourceToParse.source(shardId.getIndexName(),
|
||||
request.type(), request.id(), request.source(), request.getContentType())
|
||||
.routing(request.routing()).parent(request.parent());
|
||||
final VersionType versionType = request.versionType().versionTypeForReplicationAndRecovery();
|
||||
|
@ -578,7 +578,7 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
/** Utility method to prepare an index operation on primary shards */
|
||||
private static Engine.Index prepareIndexOperationOnPrimary(IndexRequest request, IndexShard primary) {
|
||||
final SourceToParse sourceToParse =
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(),
|
||||
SourceToParse.source(request.index(), request.type(),
|
||||
request.id(), request.source(), request.getContentType())
|
||||
.routing(request.routing()).parent(request.parent());
|
||||
return primary.prepareIndexOnPrimary(sourceToParse, request.version(), request.versionType(),
|
||||
|
|
|
@ -799,8 +799,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
executable.setNextVar("ctx", context);
|
||||
executable.run();
|
||||
|
||||
Map<String, Object> resultCtx = (Map<String, Object>) executable.unwrap(context);
|
||||
String newOp = (String) resultCtx.remove("op");
|
||||
String newOp = (String) context.remove("op");
|
||||
if (newOp == null) {
|
||||
throw new IllegalArgumentException("Script cleared operation type");
|
||||
}
|
||||
|
@ -809,25 +808,25 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* It'd be lovely to only set the source if we know its been modified
|
||||
* but it isn't worth keeping two copies of it around just to check!
|
||||
*/
|
||||
request.setSource((Map<String, Object>) resultCtx.remove(SourceFieldMapper.NAME));
|
||||
request.setSource((Map<String, Object>) context.remove(SourceFieldMapper.NAME));
|
||||
|
||||
Object newValue = resultCtx.remove(IndexFieldMapper.NAME);
|
||||
Object newValue = context.remove(IndexFieldMapper.NAME);
|
||||
if (false == doc.getIndex().equals(newValue)) {
|
||||
scriptChangedIndex(request, newValue);
|
||||
}
|
||||
newValue = resultCtx.remove(TypeFieldMapper.NAME);
|
||||
newValue = context.remove(TypeFieldMapper.NAME);
|
||||
if (false == doc.getType().equals(newValue)) {
|
||||
scriptChangedType(request, newValue);
|
||||
}
|
||||
newValue = resultCtx.remove(IdFieldMapper.NAME);
|
||||
newValue = context.remove(IdFieldMapper.NAME);
|
||||
if (false == doc.getId().equals(newValue)) {
|
||||
scriptChangedId(request, newValue);
|
||||
}
|
||||
newValue = resultCtx.remove(VersionFieldMapper.NAME);
|
||||
newValue = context.remove(VersionFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldVersion, newValue)) {
|
||||
scriptChangedVersion(request, newValue);
|
||||
}
|
||||
newValue = resultCtx.remove(ParentFieldMapper.NAME);
|
||||
newValue = context.remove(ParentFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldParent, newValue)) {
|
||||
scriptChangedParent(request, newValue);
|
||||
}
|
||||
|
@ -835,7 +834,7 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
* Its important that routing comes after parent in case you want to
|
||||
* change them both.
|
||||
*/
|
||||
newValue = resultCtx.remove(RoutingFieldMapper.NAME);
|
||||
newValue = context.remove(RoutingFieldMapper.NAME);
|
||||
if (false == Objects.equals(oldRouting, newValue)) {
|
||||
scriptChangedRouting(request, newValue);
|
||||
}
|
||||
|
@ -845,8 +844,8 @@ public abstract class AbstractAsyncBulkByScrollAction<Request extends AbstractBu
|
|||
return scriptChangedOpType(request, oldOpType, newOpType);
|
||||
}
|
||||
|
||||
if (false == resultCtx.isEmpty()) {
|
||||
throw new IllegalArgumentException("Invalid fields added to context [" + String.join(",", resultCtx.keySet()) + ']');
|
||||
if (false == context.isEmpty()) {
|
||||
throw new IllegalArgumentException("Invalid fields added to context [" + String.join(",", context.keySet()) + ']');
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.elasticsearch.index.mapper.ParentFieldMapper;
|
|||
import org.elasticsearch.index.mapper.RoutingFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
|
@ -213,11 +214,10 @@ public class UpdateHelper extends AbstractComponent {
|
|||
private Map<String, Object> executeScript(Script script, Map<String, Object> ctx) {
|
||||
try {
|
||||
if (scriptService != null) {
|
||||
ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.UPDATE);
|
||||
CompiledScript compiledScript = scriptService.compile(script, ScriptContext.Standard.UPDATE);
|
||||
ExecutableScript executableScript = scriptService.executable(compiledScript, script.getParams());
|
||||
executableScript.setNextVar("ctx", ctx);
|
||||
executableScript.run();
|
||||
// we need to unwrap the ctx...
|
||||
ctx = (Map<String, Object>) executableScript.unwrap(ctx);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new IllegalArgumentException("failed to execute script", e);
|
||||
|
|
|
@ -1415,6 +1415,14 @@ public abstract class Engine implements Closeable {
|
|||
*/
|
||||
public abstract void deactivateThrottling();
|
||||
|
||||
/**
|
||||
* Fills up the local checkpoints history with no-ops until the local checkpoint
|
||||
* and the max seen sequence ID are identical.
|
||||
* @param primaryTerm the shards primary term this engine was created for
|
||||
* @return the number of no-ops added
|
||||
*/
|
||||
public abstract int fillSequenceNumberHistory(long primaryTerm) throws IOException;
|
||||
|
||||
/**
|
||||
* Performs recovery from the transaction log.
|
||||
* This operation will close the engine if the recovery fails.
|
||||
|
|
|
@ -27,7 +27,6 @@ import org.apache.lucene.search.QueryCachingPolicy;
|
|||
import org.apache.lucene.search.ReferenceManager;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.similarities.Similarity;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -67,7 +66,6 @@ public final class EngineConfig {
|
|||
private final Engine.EventListener eventListener;
|
||||
private final QueryCache queryCache;
|
||||
private final QueryCachingPolicy queryCachingPolicy;
|
||||
private final long maxUnsafeAutoIdTimestamp;
|
||||
@Nullable
|
||||
private final ReferenceManager.RefreshListener refreshListeners;
|
||||
@Nullable
|
||||
|
@ -116,7 +114,7 @@ public final class EngineConfig {
|
|||
Similarity similarity, CodecService codecService, Engine.EventListener eventListener,
|
||||
TranslogRecoveryPerformer translogRecoveryPerformer, QueryCache queryCache, QueryCachingPolicy queryCachingPolicy,
|
||||
TranslogConfig translogConfig, TimeValue flushMergesAfter, ReferenceManager.RefreshListener refreshListeners,
|
||||
long maxUnsafeAutoIdTimestamp, Sort indexSort) {
|
||||
Sort indexSort) {
|
||||
if (openMode == null) {
|
||||
throw new IllegalArgumentException("openMode must not be null");
|
||||
}
|
||||
|
@ -143,9 +141,6 @@ public final class EngineConfig {
|
|||
this.flushMergesAfter = flushMergesAfter;
|
||||
this.openMode = openMode;
|
||||
this.refreshListeners = refreshListeners;
|
||||
assert maxUnsafeAutoIdTimestamp >= IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP :
|
||||
"maxUnsafeAutoIdTimestamp must be >= -1 but was " + maxUnsafeAutoIdTimestamp;
|
||||
this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp;
|
||||
this.indexSort = indexSort;
|
||||
}
|
||||
|
||||
|
@ -333,11 +328,10 @@ public final class EngineConfig {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine.
|
||||
* This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs
|
||||
* returns true if the engine is allowed to optimize indexing operations with an auto-generated ID
|
||||
*/
|
||||
public long getMaxUnsafeAutoIdTimestamp() {
|
||||
return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS) ? maxUnsafeAutoIdTimestamp : Long.MAX_VALUE;
|
||||
public boolean isAutoGeneratedIDsOptimizationEnabled() {
|
||||
return indexSettings.getValue(INDEX_OPTIMIZE_AUTO_GENERATED_IDS);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -128,7 +128,7 @@ public class InternalEngine extends Engine {
|
|||
private final AtomicInteger throttleRequestCount = new AtomicInteger();
|
||||
private final EngineConfig.OpenMode openMode;
|
||||
private final AtomicBoolean pendingTranslogRecovery = new AtomicBoolean(false);
|
||||
private static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp";
|
||||
public static final String MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID = "max_unsafe_auto_id_timestamp";
|
||||
private final AtomicLong maxUnsafeAutoIdTimestamp = new AtomicLong(-1);
|
||||
private final CounterMetric numVersionLookups = new CounterMetric();
|
||||
private final CounterMetric numIndexVersionsLookups = new CounterMetric();
|
||||
|
@ -136,11 +136,8 @@ public class InternalEngine extends Engine {
|
|||
public InternalEngine(EngineConfig engineConfig) throws EngineException {
|
||||
super(engineConfig);
|
||||
openMode = engineConfig.getOpenMode();
|
||||
if (engineConfig.getIndexSettings().getIndexVersionCreated().before(Version.V_5_0_0_beta1)) {
|
||||
// no optimization for pre 5.0.0.alpha6 since translog might not have all information needed
|
||||
if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) {
|
||||
maxUnsafeAutoIdTimestamp.set(Long.MAX_VALUE);
|
||||
} else {
|
||||
maxUnsafeAutoIdTimestamp.set(engineConfig.getMaxUnsafeAutoIdTimestamp());
|
||||
}
|
||||
this.versionMap = new LiveVersionMap();
|
||||
store.incRef();
|
||||
|
@ -228,6 +225,28 @@ public class InternalEngine extends Engine {
|
|||
logger.trace("created new InternalEngine");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int fillSequenceNumberHistory(long primaryTerm) throws IOException {
|
||||
try (ReleasableLock lock = writeLock.acquire()) {
|
||||
ensureOpen();
|
||||
final long localCheckpoint = seqNoService.getLocalCheckpoint();
|
||||
final long maxSeqId = seqNoService.getMaxSeqNo();
|
||||
int numNoOpsAdded = 0;
|
||||
for (long seqNo = localCheckpoint + 1; seqNo <= maxSeqId;
|
||||
// the local checkpoint might have been advanced so we are leap-frogging
|
||||
// to the next seq ID we need to process and create a noop for
|
||||
seqNo = seqNoService.getLocalCheckpoint()+1) {
|
||||
final NoOp noOp = new NoOp(seqNo, primaryTerm, Operation.Origin.PRIMARY, System.nanoTime(), "filling up seqNo history");
|
||||
innerNoOp(noOp);
|
||||
numNoOpsAdded++;
|
||||
assert seqNo <= seqNoService.getLocalCheckpoint() : "localCheckpoint didn't advanced used to be " + seqNo + " now it's on:"
|
||||
+ seqNoService.getLocalCheckpoint();
|
||||
|
||||
}
|
||||
return numNoOpsAdded;
|
||||
}
|
||||
}
|
||||
|
||||
private void updateMaxUnsafeAutoIdTimestampFromWriter(IndexWriter writer) {
|
||||
long commitMaxUnsafeAutoIdTimestamp = Long.MIN_VALUE;
|
||||
for (Map.Entry<String, String> entry : writer.getLiveCommitData()) {
|
||||
|
@ -1074,6 +1093,7 @@ public class InternalEngine extends Engine {
|
|||
}
|
||||
|
||||
private NoOpResult innerNoOp(final NoOp noOp) throws IOException {
|
||||
assert readLock.isHeldByCurrentThread() || writeLock.isHeldByCurrentThread();
|
||||
assert noOp.seqNo() > SequenceNumbersService.NO_OPS_PERFORMED;
|
||||
final long seqNo = noOp.seqNo();
|
||||
try {
|
||||
|
@ -1836,7 +1856,7 @@ public class InternalEngine extends Engine {
|
|||
mergeScheduler.refreshConfig();
|
||||
// config().isEnableGcDeletes() or config.getGcDeletesInMillis() may have changed:
|
||||
maybePruneDeletedTombstones();
|
||||
if (engineConfig.getMaxUnsafeAutoIdTimestamp() == Long.MAX_VALUE) {
|
||||
if (engineConfig.isAutoGeneratedIDsOptimizationEnabled() == false) {
|
||||
// this is an anti-viral settings you can only opt out for the entire index
|
||||
// only if a shard starts up again due to relocation or if the index is closed
|
||||
// the setting will be re-interpreted if it's set to true
|
||||
|
|
|
@ -23,22 +23,15 @@ import java.util.Objects;
|
|||
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
public class SourceToParse {
|
||||
|
||||
public static SourceToParse source(String index, String type, String id, BytesReference source, XContentType contentType) {
|
||||
return source(Origin.PRIMARY, index, type, id, source, contentType);
|
||||
}
|
||||
|
||||
public static SourceToParse source(Origin origin, String index, String type, String id, BytesReference source,
|
||||
public static SourceToParse source(String index, String type, String id, BytesReference source,
|
||||
XContentType contentType) {
|
||||
return new SourceToParse(origin, index, type, id, source, contentType);
|
||||
return new SourceToParse(index, type, id, source, contentType);
|
||||
}
|
||||
|
||||
private final Origin origin;
|
||||
|
||||
private final BytesReference source;
|
||||
|
||||
private final String index;
|
||||
|
@ -53,8 +46,7 @@ public class SourceToParse {
|
|||
|
||||
private XContentType xContentType;
|
||||
|
||||
private SourceToParse(Origin origin, String index, String type, String id, BytesReference source, XContentType xContentType) {
|
||||
this.origin = Objects.requireNonNull(origin);
|
||||
private SourceToParse(String index, String type, String id, BytesReference source, XContentType xContentType) {
|
||||
this.index = Objects.requireNonNull(index);
|
||||
this.type = Objects.requireNonNull(type);
|
||||
this.id = Objects.requireNonNull(id);
|
||||
|
@ -64,10 +56,6 @@ public class SourceToParse {
|
|||
this.xContentType = Objects.requireNonNull(xContentType);
|
||||
}
|
||||
|
||||
public Origin origin() {
|
||||
return origin;
|
||||
}
|
||||
|
||||
public BytesReference source() {
|
||||
return this.source;
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
|
@ -104,7 +105,8 @@ public class QueryRewriteContext {
|
|||
}
|
||||
|
||||
public BytesReference getTemplateBytes(Script template) {
|
||||
ExecutableScript executable = scriptService.executable(template, ScriptContext.Standard.SEARCH);
|
||||
CompiledScript compiledTemplate = scriptService.compile(template, ScriptContext.Standard.SEARCH);
|
||||
ExecutableScript executable = scriptService.executable(compiledTemplate, template.getParams());
|
||||
return (BytesReference) executable.run();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -351,7 +351,8 @@ public class QueryShardContext extends QueryRewriteContext {
|
|||
*/
|
||||
public final ExecutableScript getExecutableScript(Script script, ScriptContext context) {
|
||||
failIfFrozen();
|
||||
return scriptService.executable(script, context);
|
||||
CompiledScript compiledScript = scriptService.compile(script, context);
|
||||
return scriptService.executable(compiledScript, script.getParams());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.lucene.index.IndexFormatTooNewException;
|
|||
import org.apache.lucene.index.IndexFormatTooOldException;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.KeepOnlyLastCommitDeletionPolicy;
|
||||
import org.apache.lucene.index.SegmentInfos;
|
||||
import org.apache.lucene.index.SnapshotDeletionPolicy;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.search.Query;
|
||||
|
@ -38,11 +39,11 @@ import org.apache.lucene.store.Lock;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.ThreadInterruptedException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
|
@ -79,6 +80,7 @@ import org.elasticsearch.index.engine.Engine;
|
|||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineException;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.engine.InternalEngine;
|
||||
import org.elasticsearch.index.engine.InternalEngineFactory;
|
||||
import org.elasticsearch.index.engine.RefreshFailedEngineException;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
|
@ -1040,11 +1042,11 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
translogStats.totalOperations(0);
|
||||
translogStats.totalOperationsOnStart(0);
|
||||
}
|
||||
internalPerformTranslogRecovery(false, indexExists, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
|
||||
internalPerformTranslogRecovery(false, indexExists);
|
||||
assert recoveryState.getStage() == RecoveryState.Stage.TRANSLOG : "TRANSLOG stage expected but was: " + recoveryState.getStage();
|
||||
}
|
||||
|
||||
private void internalPerformTranslogRecovery(boolean skipTranslogRecovery, boolean indexExists, long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
private void internalPerformTranslogRecovery(boolean skipTranslogRecovery, boolean indexExists) throws IOException {
|
||||
if (state != IndexShardState.RECOVERING) {
|
||||
throw new IndexShardNotRecoveringException(shardId, state);
|
||||
}
|
||||
|
@ -1073,7 +1075,10 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
} else {
|
||||
openMode = EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG;
|
||||
}
|
||||
final EngineConfig config = newEngineConfig(openMode, maxUnsafeAutoIdTimestamp);
|
||||
|
||||
assert indexExists == false || assertMaxUnsafeAutoIdInCommit();
|
||||
|
||||
final EngineConfig config = newEngineConfig(openMode);
|
||||
// we disable deletes since we allow for operations to be executed against the shard while recovering
|
||||
// but we need to make sure we don't loose deletes until we are done recovering
|
||||
config.setEnableGcDeletes(false);
|
||||
|
@ -1087,6 +1092,22 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
}
|
||||
}
|
||||
|
||||
private boolean assertMaxUnsafeAutoIdInCommit() throws IOException {
|
||||
final Map<String, String> userData = SegmentInfos.readLatestCommit(store.directory()).getUserData();
|
||||
if (recoveryState().getRecoverySource().getType() == RecoverySource.Type.PEER) {
|
||||
// as of 5.5.0, the engine stores the maxUnsafeAutoIdTimestamp in the commit point.
|
||||
// This should have baked into the commit by the primary we recover from, regardless of the index age.
|
||||
assert userData.containsKey(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) :
|
||||
"recovery from remote but " + InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID + " is not found in commit";
|
||||
} else if (recoveryState().getRecoverySource().getType() == RecoverySource.Type.EXISTING_STORE &&
|
||||
indexSettings.getIndexVersionCreated().onOrAfter(Version.V_5_5_0_UNRELEASED)) {
|
||||
assert userData.containsKey(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID) :
|
||||
"opening index which was created post 5.5.0 but " + InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID
|
||||
+ " is not found in commit";
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
protected void onNewEngine(Engine newEngine) {
|
||||
refreshListeners.setTranslog(newEngine.getTranslog());
|
||||
}
|
||||
|
@ -1096,9 +1117,9 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
* the replay of the transaction log which is required in cases where we restore a previous index or recover from
|
||||
* a remote peer.
|
||||
*/
|
||||
public void skipTranslogRecovery(long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
public void skipTranslogRecovery() throws IOException {
|
||||
assert getEngineOrNull() == null : "engine was already created";
|
||||
internalPerformTranslogRecovery(true, true, maxUnsafeAutoIdTimestamp);
|
||||
internalPerformTranslogRecovery(true, true);
|
||||
assert recoveryState.getTranslog().recoveredOperations() == 0;
|
||||
}
|
||||
|
||||
|
@ -1795,14 +1816,13 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
|||
return mapperService.documentMapperWithAutoCreate(type);
|
||||
}
|
||||
|
||||
private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode, long maxUnsafeAutoIdTimestamp) {
|
||||
private EngineConfig newEngineConfig(EngineConfig.OpenMode openMode) {
|
||||
final IndexShardRecoveryPerformer translogRecoveryPerformer = new IndexShardRecoveryPerformer(shardId, mapperService, logger);
|
||||
Sort indexSort = indexSortSupplier.get();
|
||||
return new EngineConfig(openMode, shardId,
|
||||
threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(),
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig,
|
||||
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners,
|
||||
maxUnsafeAutoIdTimestamp, indexSort);
|
||||
IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), refreshListeners, indexSort);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.apache.lucene.store.FilterDirectory;
|
|||
import org.apache.lucene.store.IOContext;
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
|
@ -353,7 +352,7 @@ final class StoreRecovery {
|
|||
recoveryState.getIndex().updateVersion(version);
|
||||
if (recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) {
|
||||
assert indexShouldExists;
|
||||
indexShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
|
||||
indexShard.skipTranslogRecovery();
|
||||
} else {
|
||||
// since we recover from local, just fill the files and size
|
||||
try {
|
||||
|
@ -365,6 +364,8 @@ final class StoreRecovery {
|
|||
logger.debug("failed to list file details", e);
|
||||
}
|
||||
indexShard.performTranslogRecovery(indexShouldExists);
|
||||
assert indexShard.shardRouting.primary() : "only primary shards can recover from store";
|
||||
indexShard.getEngine().fillSequenceNumberHistory(indexShard.getPrimaryTerm());
|
||||
}
|
||||
indexShard.finalizeRecovery();
|
||||
indexShard.postRecovery("post recovery from shard_store");
|
||||
|
@ -405,7 +406,7 @@ final class StoreRecovery {
|
|||
}
|
||||
final IndexId indexId = repository.getRepositoryData().resolveIndexId(indexName);
|
||||
repository.restoreShard(indexShard, restoreSource.snapshot().getSnapshotId(), restoreSource.version(), indexId, snapshotShardId, indexShard.recoveryState());
|
||||
indexShard.skipTranslogRecovery(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP);
|
||||
indexShard.skipTranslogRecovery();
|
||||
indexShard.finalizeRecovery();
|
||||
indexShard.postRecovery("restore done");
|
||||
} catch (Exception e) {
|
||||
|
|
|
@ -377,7 +377,7 @@ public class PeerRecoveryTargetService extends AbstractComponent implements Inde
|
|||
public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel) throws Exception {
|
||||
try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId()
|
||||
)) {
|
||||
recoveryRef.target().prepareForTranslogOperations(request.totalTranslogOps(), request.getMaxUnsafeAutoIdTimestamp());
|
||||
recoveryRef.target().prepareForTranslogOperations(request.totalTranslogOps());
|
||||
}
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
@ -29,7 +30,6 @@ import java.io.IOException;
|
|||
|
||||
public class RecoveryPrepareForTranslogOperationsRequest extends TransportRequest {
|
||||
|
||||
private long maxUnsafeAutoIdTimestamp = IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP;
|
||||
private long recoveryId;
|
||||
private ShardId shardId;
|
||||
private int totalTranslogOps = RecoveryState.Translog.UNKNOWN;
|
||||
|
@ -37,11 +37,10 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
|
|||
public RecoveryPrepareForTranslogOperationsRequest() {
|
||||
}
|
||||
|
||||
RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps, long maxUnsafeAutoIdTimestamp) {
|
||||
RecoveryPrepareForTranslogOperationsRequest(long recoveryId, ShardId shardId, int totalTranslogOps) {
|
||||
this.recoveryId = recoveryId;
|
||||
this.shardId = shardId;
|
||||
this.totalTranslogOps = totalTranslogOps;
|
||||
this.maxUnsafeAutoIdTimestamp = maxUnsafeAutoIdTimestamp;
|
||||
}
|
||||
|
||||
public long recoveryId() {
|
||||
|
@ -56,17 +55,15 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
|
|||
return totalTranslogOps;
|
||||
}
|
||||
|
||||
public long getMaxUnsafeAutoIdTimestamp() {
|
||||
return maxUnsafeAutoIdTimestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
recoveryId = in.readLong();
|
||||
shardId = ShardId.readShardId(in);
|
||||
totalTranslogOps = in.readVInt();
|
||||
maxUnsafeAutoIdTimestamp = in.readLong();
|
||||
if (in.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
in.readLong(); // maxUnsafeAutoIdTimestamp
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,6 +72,8 @@ public class RecoveryPrepareForTranslogOperationsRequest extends TransportReques
|
|||
out.writeLong(recoveryId);
|
||||
shardId.writeTo(out);
|
||||
out.writeVInt(totalTranslogOps);
|
||||
out.writeLong(maxUnsafeAutoIdTimestamp);
|
||||
if (out.getVersion().before(Version.V_6_0_0_alpha1_UNRELEASED)) {
|
||||
out.writeLong(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP); // maxUnsafeAutoIdTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -157,7 +157,7 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
|
||||
try {
|
||||
prepareTargetForTranslog(translogView.totalOperations(), shard.segmentStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
prepareTargetForTranslog(translogView.totalOperations());
|
||||
} catch (final Exception e) {
|
||||
throw new RecoveryEngineException(shard.shardId(), 1, "prepare target for translog failed", e);
|
||||
}
|
||||
|
@ -389,13 +389,13 @@ public class RecoverySourceHandler {
|
|||
}
|
||||
}
|
||||
|
||||
void prepareTargetForTranslog(final int totalTranslogOps, final long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
void prepareTargetForTranslog(final int totalTranslogOps) throws IOException {
|
||||
StopWatch stopWatch = new StopWatch().start();
|
||||
logger.trace("recovery [phase1]: prepare remote engine for translog");
|
||||
final long startEngineStart = stopWatch.totalTime().millis();
|
||||
// Send a request preparing the new shard's translog to receive operations. This ensures the shard engine is started and disables
|
||||
// garbage collection (not the JVM's GC!) of tombstone deletes.
|
||||
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp));
|
||||
cancellableThreads.executeIO(() -> recoveryTarget.prepareForTranslogOperations(totalTranslogOps));
|
||||
stopWatch.stop();
|
||||
|
||||
response.startTime = stopWatch.totalTime().millis() - startEngineStart;
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.elasticsearch.common.UUIDs;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.Callback;
|
||||
import org.elasticsearch.common.util.CancellableThreads;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRefCounted;
|
||||
|
@ -49,7 +48,6 @@ import org.elasticsearch.index.store.StoreFileMetaData;
|
|||
import org.elasticsearch.index.translog.Translog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
|
@ -58,8 +56,6 @@ import java.util.Map;
|
|||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
|
@ -360,9 +356,9 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
/*** Implementation of {@link RecoveryTargetHandler } */
|
||||
|
||||
@Override
|
||||
public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
|
||||
state().getTranslog().totalOperations(totalTranslogOps);
|
||||
indexShard().skipTranslogRecovery(maxUnsafeAutoIdTimestamp);
|
||||
indexShard().skipTranslogRecovery();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -33,10 +33,8 @@ public interface RecoveryTargetHandler {
|
|||
* Prepares the target to receive translog operations, after all file have been copied
|
||||
*
|
||||
* @param totalTranslogOps total translog operations expected to be sent
|
||||
* @param maxUnsafeAutoIdTimestamp the max timestamp that is used to de-optimize documents with auto-generated IDs in the engine.
|
||||
* This is used to ensure we don't add duplicate documents when we assume an append only case based on auto-generated IDs
|
||||
*/
|
||||
void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException;
|
||||
void prepareForTranslogOperations(int totalTranslogOps) throws IOException;
|
||||
|
||||
/**
|
||||
* The finalize request refreshes the engine now that new segments are available, enables garbage collection of tombstone files, and
|
||||
|
|
|
@ -78,9 +78,9 @@ public class RemoteRecoveryTargetHandler implements RecoveryTargetHandler {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
|
||||
transportService.submitRequest(targetNode, PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG,
|
||||
new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps, maxUnsafeAutoIdTimestamp),
|
||||
new RecoveryPrepareForTranslogOperationsRequest(recoveryId, shardId, totalTranslogOps),
|
||||
TransportRequestOptions.builder().withTimeout(recoverySettings.internalActionTimeout()).build(),
|
||||
EmptyTransportResponseHandler.INSTANCE_SAME).txGet();
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.plugins;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.Terminal.Verbosity;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.analysis.util.CharFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenFilterFactory;
|
||||
import org.apache.lucene.analysis.util.TokenizerFactory;
|
||||
|
@ -36,7 +34,6 @@ import org.elasticsearch.common.collect.Tuple;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Module;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
@ -45,21 +42,19 @@ import org.elasticsearch.index.IndexModule;
|
|||
import org.elasticsearch.threadpool.ExecutorBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
@ -310,6 +305,19 @@ public class PluginsService extends AbstractComponent {
|
|||
throw new IllegalStateException("Could not load plugin descriptor for existing plugin ["
|
||||
+ plugin.getFileName() + "]. Was the plugin built before 2.0?", e);
|
||||
}
|
||||
/*
|
||||
* Check for the existence of a marker file that indicates the plugin is in a garbage state from a failed attempt to remove
|
||||
* the plugin.
|
||||
*/
|
||||
final Path removing = plugin.resolve(".removing-" + info.getName());
|
||||
if (Files.exists(removing)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"found file [%s] from a failed attempt to remove the plugin [%s]; execute [elasticsearch-plugin remove %2$s]",
|
||||
removing,
|
||||
info.getName());
|
||||
throw new IllegalStateException(message);
|
||||
}
|
||||
|
||||
Set<URL> urls = new LinkedHashSet<>();
|
||||
try (DirectoryStream<Path> jarStream = Files.newDirectoryStream(plugin, "*.jar")) {
|
||||
|
|
|
@ -38,14 +38,4 @@ public interface ExecutableScript {
|
|||
* Executes the script.
|
||||
*/
|
||||
Object run();
|
||||
|
||||
/**
|
||||
* Unwraps a possible script value. For example, when passing vars and
|
||||
* expecting the returned value to be part of the vars. Javascript and
|
||||
* Python need this but other scripting engines just return the values
|
||||
* passed in.
|
||||
*/
|
||||
default Object unwrap(Object value) {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -469,13 +469,6 @@ public class ScriptService extends AbstractComponent implements Closeable, Clust
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compiles (or retrieves from cache) and executes the provided script
|
||||
*/
|
||||
public ExecutableScript executable(Script script, ScriptContext scriptContext) {
|
||||
return executable(compile(script, scriptContext), script.getParams());
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a previously compiled script provided as an argument
|
||||
*/
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.elasticsearch.common.io.stream.Writeable;
|
|||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser.ValueType;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -52,7 +52,7 @@ import java.util.Objects;
|
|||
* {@link StoredScriptSource} represents user-defined parameters for a script
|
||||
* saved in the {@link ClusterState}.
|
||||
*/
|
||||
public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> implements Writeable, ToXContent {
|
||||
public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> implements Writeable, ToXContentObject {
|
||||
|
||||
/**
|
||||
* Standard {@link ParseField} for outer level of stored script source.
|
||||
|
@ -123,10 +123,6 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
* Appends the user-defined compiler options with the internal compiler options.
|
||||
*/
|
||||
private void setOptions(Map<String, String> options) {
|
||||
if (options.containsKey(Script.CONTENT_TYPE_OPTION)) {
|
||||
throw new IllegalArgumentException(Script.CONTENT_TYPE_OPTION + " cannot be user-specified");
|
||||
}
|
||||
|
||||
this.options.putAll(options);
|
||||
}
|
||||
|
||||
|
@ -266,8 +262,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
//this is really for search templates, that need to be converted to json format
|
||||
try (XContentBuilder builder = XContentFactory.jsonBuilder()) {
|
||||
builder.copyCurrentStructure(parser);
|
||||
return new StoredScriptSource(lang, builder.string(),
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
|
||||
return new StoredScriptSource(lang, builder.string(), Collections.emptyMap());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,8 +278,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
token = parser.nextToken();
|
||||
|
||||
if (token == Token.VALUE_STRING) {
|
||||
return new StoredScriptSource(lang, parser.text(),
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
|
||||
return new StoredScriptSource(lang, parser.text(), Collections.emptyMap());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -297,8 +291,7 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
builder.copyCurrentStructure(parser);
|
||||
}
|
||||
|
||||
return new StoredScriptSource(lang, builder.string(),
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()));
|
||||
return new StoredScriptSource(lang, builder.string(), Collections.emptyMap());
|
||||
}
|
||||
}
|
||||
} catch (IOException ioe) {
|
||||
|
@ -433,11 +426,6 @@ public class StoredScriptSource extends AbstractDiffable<StoredScriptSource> imp
|
|||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isFragment() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The language used for compiling this script.
|
||||
*/
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
|
@ -92,7 +93,8 @@ public class ScriptHeuristic extends SignificanceHeuristic {
|
|||
|
||||
@Override
|
||||
public SignificanceHeuristic rewrite(InternalAggregation.ReduceContext context) {
|
||||
return new ExecutableScriptHeuristic(script, context.scriptService().executable(script, ScriptContext.Standard.AGGS));
|
||||
CompiledScript compiledScript = context.scriptService().compile(script, ScriptContext.Standard.AGGS);
|
||||
return new ExecutableScriptHeuristic(script, context.scriptService().executable(compiledScript, script.getParams()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -50,7 +50,7 @@ public final class ScriptFieldsFetchSubPhase implements FetchSubPhase {
|
|||
|
||||
final Object value;
|
||||
try {
|
||||
value = leafScript.unwrap(leafScript.run());
|
||||
value = leafScript.run();
|
||||
} catch (RuntimeException e) {
|
||||
if (scriptField.ignoreException()) {
|
||||
continue;
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.update.UpdateRequest;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static java.util.Collections.emptySet;
|
||||
import static java.util.Collections.singleton;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
public class TransportBulkActionIndicesThatCannotBeCreatedTests extends ESTestCase {
|
||||
public void testNonExceptional() {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest(randomAlphaOfLength(5)));
|
||||
bulkRequest.add(new IndexRequest(randomAlphaOfLength(5)));
|
||||
bulkRequest.add(new DeleteRequest(randomAlphaOfLength(5)));
|
||||
bulkRequest.add(new UpdateRequest(randomAlphaOfLength(5), randomAlphaOfLength(5), randomAlphaOfLength(5)));
|
||||
// Test emulating auto_create_index=false
|
||||
indicesThatCannotBeCreatedTestCase(emptySet(), bulkRequest, null);
|
||||
// Test emulating auto_create_index=true
|
||||
indicesThatCannotBeCreatedTestCase(emptySet(), bulkRequest, index -> true);
|
||||
// Test emulating all indices already created
|
||||
indicesThatCannotBeCreatedTestCase(emptySet(), bulkRequest, index -> false);
|
||||
// Test emulating auto_create_index=true with some indices already created.
|
||||
indicesThatCannotBeCreatedTestCase(emptySet(), bulkRequest, index -> randomBoolean());
|
||||
}
|
||||
|
||||
public void testAllFail() {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("no"));
|
||||
bulkRequest.add(new IndexRequest("can't"));
|
||||
bulkRequest.add(new DeleteRequest("do"));
|
||||
bulkRequest.add(new UpdateRequest("nothin", randomAlphaOfLength(5), randomAlphaOfLength(5)));
|
||||
indicesThatCannotBeCreatedTestCase(new HashSet<>(Arrays.asList("no", "can't", "do", "nothin")), bulkRequest, index -> {
|
||||
throw new IndexNotFoundException("Can't make it because I say so");
|
||||
});
|
||||
}
|
||||
|
||||
public void testSomeFail() {
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(new IndexRequest("ok"));
|
||||
bulkRequest.add(new IndexRequest("bad"));
|
||||
// Emulate auto_create_index=-bad,+*
|
||||
indicesThatCannotBeCreatedTestCase(singleton("bad"), bulkRequest, index -> {
|
||||
if (index.equals("bad")) {
|
||||
throw new IndexNotFoundException("Can't make it because I say so");
|
||||
}
|
||||
return true;
|
||||
});
|
||||
// Emulate auto_create_index=false but the "ok" index already exists
|
||||
indicesThatCannotBeCreatedTestCase(singleton("bad"), bulkRequest, index -> {
|
||||
if (index.equals("bad")) {
|
||||
throw new IndexNotFoundException("Can't make it because I say so");
|
||||
}
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private void indicesThatCannotBeCreatedTestCase(Set<String> expected,
|
||||
BulkRequest bulkRequest, Function<String, Boolean> shouldAutoCreate) {
|
||||
TransportBulkAction action = new TransportBulkAction(Settings.EMPTY, null, mock(TransportService.class), mock(ClusterService.class),
|
||||
null, null, null, mock(ActionFilters.class), null, null) {
|
||||
@Override
|
||||
void executeBulk(Task task, BulkRequest bulkRequest, long startTimeNanos, ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses, Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
|
||||
assertEquals(expected, indicesThatCannotBeCreated.keySet());
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean needToCheck() {
|
||||
return null != shouldAutoCreate; // Use "null" to mean "no indices can be created so don't bother checking"
|
||||
}
|
||||
|
||||
@Override
|
||||
boolean shouldAutoCreate(String index, ClusterState state) {
|
||||
return shouldAutoCreate.apply(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
void createIndex(String index, TimeValue timeout, ActionListener<CreateIndexResponse> listener) {
|
||||
// If we try to create an index just immediately assume it worked
|
||||
listener.onResponse(new CreateIndexResponse(true, true) {});
|
||||
}
|
||||
};
|
||||
action.doExecute(null, bulkRequest, null);
|
||||
}
|
||||
}
|
|
@ -28,15 +28,13 @@ import org.elasticsearch.action.support.ActionFilters;
|
|||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateApplier;
|
||||
import org.elasticsearch.cluster.action.shard.ShardStateAction;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.ingest.IngestService;
|
||||
import org.elasticsearch.ingest.PipelineExecutionService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -51,10 +49,10 @@ import org.mockito.MockitoAnnotations;
|
|||
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.function.Consumer;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.sameInstance;
|
||||
|
@ -114,8 +112,8 @@ public class TransportBulkActionIngestTests extends ESTestCase {
|
|||
return false;
|
||||
}
|
||||
@Override
|
||||
void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos,
|
||||
final ActionListener<BulkResponse> listener, final AtomicArray<BulkItemResponse> responses) {
|
||||
void executeBulk(Task task, final BulkRequest bulkRequest, final long startTimeNanos, final ActionListener<BulkResponse> listener,
|
||||
final AtomicArray<BulkItemResponse> responses, Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
|
||||
isExecuted = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.transport.CapturingTransport;
|
||||
|
@ -49,6 +50,7 @@ import org.junit.BeforeClass;
|
|||
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.LongSupplier;
|
||||
|
@ -125,9 +127,10 @@ public class TransportBulkActionTookTests extends ESTestCase {
|
|||
BulkRequest bulkRequest,
|
||||
long startTimeNanos,
|
||||
ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses) {
|
||||
AtomicArray<BulkItemResponse> responses,
|
||||
Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
|
||||
expected.set(1000000);
|
||||
super.executeBulk(task, bulkRequest, startTimeNanos, listener, responses);
|
||||
super.executeBulk(task, bulkRequest, startTimeNanos, listener, responses, indicesThatCannotBeCreated);
|
||||
}
|
||||
};
|
||||
} else {
|
||||
|
@ -149,10 +152,11 @@ public class TransportBulkActionTookTests extends ESTestCase {
|
|||
BulkRequest bulkRequest,
|
||||
long startTimeNanos,
|
||||
ActionListener<BulkResponse> listener,
|
||||
AtomicArray<BulkItemResponse> responses) {
|
||||
AtomicArray<BulkItemResponse> responses,
|
||||
Map<String, IndexNotFoundException> indicesThatCannotBeCreated) {
|
||||
long elapsed = spinForAtLeastOneMillisecond();
|
||||
expected.set(elapsed);
|
||||
super.executeBulk(task, bulkRequest, startTimeNanos, listener, responses);
|
||||
super.executeBulk(task, bulkRequest, startTimeNanos, listener, responses, indicesThatCannotBeCreated);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -56,10 +56,10 @@ import org.apache.lucene.index.TieredMergePolicy;
|
|||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.MatchAllDocsQuery;
|
||||
import org.apache.lucene.search.ReferenceManager;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortedSetSortField;
|
||||
import org.apache.lucene.search.TermQuery;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.apache.lucene.search.TotalHitCountCollector;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
@ -114,6 +114,7 @@ import org.elasticsearch.index.mapper.ParsedDocument;
|
|||
import org.elasticsearch.index.mapper.RootObjectMapper;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
|
@ -191,7 +192,7 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
|
||||
public class InternalEngineTests extends ESTestCase {
|
||||
|
||||
protected final ShardId shardId = new ShardId(new Index("index", "_na_"), 1);
|
||||
protected final ShardId shardId = new ShardId(new Index("index", "_na_"), 0);
|
||||
private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
|
||||
|
||||
protected ThreadPool threadPool;
|
||||
|
@ -262,7 +263,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
config.getStore(), config.getDeletionPolicy(), config.getMergePolicy(), analyzer, config.getSimilarity(),
|
||||
new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(), config.getQueryCache(),
|
||||
config.getQueryCachingPolicy(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(),
|
||||
config.getMaxUnsafeAutoIdTimestamp(), config.getIndexSort());
|
||||
config.getIndexSort());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -371,7 +372,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
@Nullable IndexWriterFactory indexWriterFactory,
|
||||
@Nullable Supplier<SequenceNumbersService> sequenceNumbersServiceSupplier,
|
||||
@Nullable Sort indexSort) throws IOException {
|
||||
EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null, indexSort);
|
||||
EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null, indexSort);
|
||||
InternalEngine internalEngine = createInternalEngine(indexWriterFactory, sequenceNumbersServiceSupplier, config);
|
||||
if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) {
|
||||
internalEngine.recoverFromTranslog();
|
||||
|
@ -404,25 +405,22 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
|
||||
long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener) {
|
||||
return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(),
|
||||
maxUnsafeAutoIdTimestamp, refreshListener, null);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
|
||||
long maxUnsafeAutoIdTimestamp, ReferenceManager.RefreshListener refreshListener, Sort indexSort) {
|
||||
return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(),
|
||||
maxUnsafeAutoIdTimestamp, refreshListener, indexSort);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
|
||||
SnapshotDeletionPolicy deletionPolicy, long maxUnsafeAutoIdTimestamp,
|
||||
ReferenceManager.RefreshListener refreshListener) {
|
||||
return config(indexSettings, store, translogPath, mergePolicy, deletionPolicy, maxUnsafeAutoIdTimestamp, refreshListener, null);
|
||||
return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(), refreshListener, null);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
|
||||
SnapshotDeletionPolicy deletionPolicy, long maxUnsafeAutoIdTimestamp,
|
||||
ReferenceManager.RefreshListener refreshListener, Sort indexSort) {
|
||||
return config(indexSettings, store, translogPath, mergePolicy, createSnapshotDeletionPolicy(), refreshListener, indexSort);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
|
||||
SnapshotDeletionPolicy deletionPolicy, ReferenceManager.RefreshListener refreshListener) {
|
||||
return config(indexSettings, store, translogPath, mergePolicy, deletionPolicy, refreshListener, null);
|
||||
}
|
||||
|
||||
public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy,
|
||||
SnapshotDeletionPolicy deletionPolicy,
|
||||
ReferenceManager.RefreshListener refreshListener, Sort indexSort) {
|
||||
IndexWriterConfig iwc = newIndexWriterConfig();
|
||||
TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE);
|
||||
|
@ -445,8 +443,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
EngineConfig config = new EngineConfig(openMode, shardId, threadPool, indexSettings, null, store, deletionPolicy,
|
||||
mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener,
|
||||
new TranslogHandler(xContentRegistry(), shardId.getIndexName(), logger), IndexSearcher.getDefaultQueryCache(),
|
||||
IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListener,
|
||||
maxUnsafeAutoIdTimestamp, indexSort);
|
||||
IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig, TimeValue.timeValueMinutes(5), refreshListener, indexSort);
|
||||
|
||||
return config;
|
||||
}
|
||||
|
@ -1170,8 +1167,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testSyncedFlush() throws IOException {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
|
||||
new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), new LogByteSizeMergePolicy(), null))) {
|
||||
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
|
||||
ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null);
|
||||
engine.index(indexForDoc(doc));
|
||||
|
@ -1198,7 +1194,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
for (int i = 0; i < iters; i++) {
|
||||
try (Store store = createStore();
|
||||
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
|
||||
new LogDocMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
|
||||
new LogDocMergePolicy(), null))) {
|
||||
final String syncId = randomUnicodeOfCodepointLengthBetween(10, 20);
|
||||
Engine.Index doc1 = indexForDoc(testParsedDocument("1", "test", null, testDocumentWithTextField(), B_1, null));
|
||||
engine.index(doc1);
|
||||
|
@ -1317,7 +1313,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
public void testForceMerge() throws IOException {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(),
|
||||
new LogByteSizeMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) { // use log MP here we test some behavior in ESMP
|
||||
new LogByteSizeMergePolicy(), null))) { // use log MP here we test some behavior in ESMP
|
||||
int numDocs = randomIntBetween(10, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
ParsedDocument doc = testParsedDocument(Integer.toString(i), "test", null, testDocument(), B_1, null);
|
||||
|
@ -1961,7 +1957,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
@Override
|
||||
public void append(LogEvent event) {
|
||||
final String formattedMessage = event.getMessage().getFormattedMessage();
|
||||
if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][1] ")) {
|
||||
if (event.getLevel() == Level.TRACE && event.getMarker().getName().contains("[index][0] ")) {
|
||||
if (event.getLoggerName().endsWith(".IW") &&
|
||||
formattedMessage.contains("IW: apply all deletes during flush")) {
|
||||
sawIndexWriterMessage = true;
|
||||
|
@ -2132,8 +2128,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
public void testConcurrentWritesAndCommits() throws Exception {
|
||||
try (Store store = createStore();
|
||||
InternalEngine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(),
|
||||
new SnapshotDeletionPolicy(NoDeletionPolicy.INSTANCE),
|
||||
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
|
||||
new SnapshotDeletionPolicy(NoDeletionPolicy.INSTANCE), null))) {
|
||||
|
||||
final int numIndexingThreads = scaledRandomIntBetween(3, 6);
|
||||
final int numDocsPerThread = randomIntBetween(500, 1000);
|
||||
|
@ -2274,7 +2269,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testEnableGcDeletes() throws Exception {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), newMergePolicy(), null))) {
|
||||
engine.config().setEnableGcDeletes(false);
|
||||
|
||||
// Add document
|
||||
|
@ -2341,7 +2336,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
private Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo,
|
||||
boolean isRetry) {
|
||||
return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL,
|
||||
return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL,
|
||||
Engine.Operation.Origin.REPLICA, System.nanoTime(),
|
||||
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry);
|
||||
}
|
||||
|
@ -2421,7 +2416,8 @@ public class InternalEngineTests extends ESTestCase {
|
|||
// expected
|
||||
}
|
||||
// now it should be OK.
|
||||
EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null), EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG);
|
||||
EngineConfig config = copy(config(defaultSettings, store, primaryTranslogDir, newMergePolicy(), null),
|
||||
EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG);
|
||||
engine = new InternalEngine(config);
|
||||
}
|
||||
|
||||
|
@ -2736,7 +2732,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
config.getIndexSettings(), null, store, createSnapshotDeletionPolicy(), newMergePolicy(), config.getAnalyzer(),
|
||||
config.getSimilarity(), new CodecService(null, logger), config.getEventListener(), config.getTranslogRecoveryPerformer(),
|
||||
IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
|
||||
TimeValue.timeValueMinutes(5), config.getRefreshListeners(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null);
|
||||
TimeValue.timeValueMinutes(5), config.getRefreshListeners(), null);
|
||||
|
||||
try {
|
||||
InternalEngine internalEngine = new InternalEngine(brokenConfig);
|
||||
|
@ -2788,7 +2784,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
|
||||
public void testCurrentTranslogIDisCommitted() throws IOException {
|
||||
try (Store store = createStore()) {
|
||||
EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null);
|
||||
EngineConfig config = config(defaultSettings, store, createTempDir(), newMergePolicy(), null);
|
||||
|
||||
// create
|
||||
{
|
||||
|
@ -3284,47 +3280,36 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testEngineMaxTimestampIsInitialized() throws IOException {
|
||||
try (Store store = createStore();
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, createTempDir(), NoMergePolicy.INSTANCE,
|
||||
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
|
||||
assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
|
||||
}
|
||||
|
||||
final long timestamp1 = Math.abs(randomLong());
|
||||
final long timestamp1 = Math.abs(randomNonNegativeLong());
|
||||
final Path storeDir = createTempDir();
|
||||
final Path translogDir = createTempDir();
|
||||
try (Store store = createStore(newFSDirectory(storeDir));
|
||||
Engine engine = new InternalEngine(
|
||||
config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, timestamp1, null))) {
|
||||
assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
}
|
||||
final long timestamp2 = randomNonNegativeLong();
|
||||
final long timestamp3 = randomNonNegativeLong();
|
||||
final long maxTimestamp12 = Math.max(timestamp1, timestamp2);
|
||||
final long maxTimestamp123 = Math.max(maxTimestamp12, timestamp3);
|
||||
try (Store store = createStore(newFSDirectory(storeDir));
|
||||
Engine engine = new InternalEngine(
|
||||
copy(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, timestamp2, null),
|
||||
randomFrom(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG)))) {
|
||||
assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
if (engine.config().getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) {
|
||||
// recover from translog and commit maxTimestamp12
|
||||
engine.recoverFromTranslog();
|
||||
// force flush as the were no ops performed
|
||||
engine.flush(true, false);
|
||||
}
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null))) {
|
||||
assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
final ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(),
|
||||
new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
|
||||
engine.index(appendOnlyPrimary(doc, true, timestamp3));
|
||||
assertEquals(maxTimestamp123, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
engine.index(appendOnlyPrimary(doc, true, timestamp1));
|
||||
assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
}
|
||||
try (Store store = createStore(newFSDirectory(storeDir));
|
||||
Engine engine = new InternalEngine(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null))) {
|
||||
assertEquals(IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
engine.recoverFromTranslog();
|
||||
assertEquals(timestamp1, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
final ParsedDocument doc = testParsedDocument("1", "test", null, testDocumentWithTextField(),
|
||||
new BytesArray("{}".getBytes(Charset.defaultCharset())), null);
|
||||
engine.index(appendOnlyPrimary(doc, true, timestamp2));
|
||||
assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
engine.flush();
|
||||
}
|
||||
try (Store store = createStore(newFSDirectory(storeDir));
|
||||
Engine engine = new InternalEngine(
|
||||
config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null))) {
|
||||
copy(config(defaultSettings, store, translogDir, NoMergePolicy.INSTANCE, null),
|
||||
randomFrom(EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG, EngineConfig.OpenMode.OPEN_INDEX_CREATE_TRANSLOG)))) {
|
||||
assertEquals(maxTimestamp12, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
engine.recoverFromTranslog();
|
||||
assertEquals(maxTimestamp123, engine.segmentsStats(false).getMaxUnsafeAutoIdTimestamp());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3394,8 +3379,7 @@ public class InternalEngineTests extends ESTestCase {
|
|||
CyclicBarrier join = new CyclicBarrier(2);
|
||||
CountDownLatch start = new CountDownLatch(1);
|
||||
AtomicInteger controller = new AtomicInteger(0);
|
||||
EngineConfig config = config(defaultSettings, store, translogPath, newMergePolicy(),
|
||||
IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, new ReferenceManager.RefreshListener() {
|
||||
EngineConfig config = config(defaultSettings, store, translogPath, newMergePolicy(), new ReferenceManager.RefreshListener() {
|
||||
@Override
|
||||
public void beforeRefresh() throws IOException {
|
||||
}
|
||||
|
@ -3853,4 +3837,79 @@ public class InternalEngineTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testFillUpSequenceIdGapsOnRecovery() throws IOException {
|
||||
final int docs = randomIntBetween(1, 32);
|
||||
int numDocsOnReplica = 0;
|
||||
long maxSeqIDOnReplica = -1;
|
||||
long checkpointOnReplica;
|
||||
try {
|
||||
for (int i = 0; i < docs; i++) {
|
||||
final String docId = Integer.toString(i);
|
||||
final ParsedDocument doc =
|
||||
testParsedDocument(docId, "test", null, testDocumentWithTextField(), SOURCE, null);
|
||||
Engine.Index primaryResponse = indexForDoc(doc);
|
||||
Engine.IndexResult indexResult = engine.index(primaryResponse);
|
||||
if (randomBoolean()) {
|
||||
numDocsOnReplica++;
|
||||
maxSeqIDOnReplica = indexResult.getSeqNo();
|
||||
replicaEngine.index(replicaIndexForDoc(doc, 1, indexResult.getSeqNo(), false));
|
||||
}
|
||||
}
|
||||
checkpointOnReplica = replicaEngine.seqNoService().getLocalCheckpoint();
|
||||
} finally {
|
||||
IOUtils.close(replicaEngine);
|
||||
}
|
||||
|
||||
|
||||
boolean flushed = false;
|
||||
Engine recoveringEngine = null;
|
||||
try {
|
||||
assertEquals(docs-1, engine.seqNoService().getMaxSeqNo());
|
||||
assertEquals(docs-1, engine.seqNoService().getLocalCheckpoint());
|
||||
assertEquals(maxSeqIDOnReplica, replicaEngine.seqNoService().getMaxSeqNo());
|
||||
assertEquals(checkpointOnReplica, replicaEngine.seqNoService().getLocalCheckpoint());
|
||||
recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
|
||||
assertEquals(numDocsOnReplica, recoveringEngine.getTranslog().totalOperations());
|
||||
recoveringEngine.recoverFromTranslog();
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getMaxSeqNo());
|
||||
assertEquals(checkpointOnReplica, recoveringEngine.seqNoService().getLocalCheckpoint());
|
||||
assertEquals((maxSeqIDOnReplica+1) - numDocsOnReplica, recoveringEngine.fillSequenceNumberHistory(2));
|
||||
|
||||
// now snapshot the tlog and ensure the primary term is updated
|
||||
Translog.Snapshot snapshot = recoveringEngine.getTranslog().newSnapshot();
|
||||
assertTrue((maxSeqIDOnReplica+1) - numDocsOnReplica <= snapshot.totalOperations());
|
||||
Translog.Operation operation;
|
||||
while((operation = snapshot.next()) != null) {
|
||||
if (operation.opType() == Translog.Operation.Type.NO_OP) {
|
||||
assertEquals(2, operation.primaryTerm());
|
||||
} else {
|
||||
assertEquals(1, operation.primaryTerm());
|
||||
}
|
||||
|
||||
}
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getMaxSeqNo());
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getLocalCheckpoint());
|
||||
if ((flushed = randomBoolean())) {
|
||||
recoveringEngine.flush(true, true);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.close(recoveringEngine);
|
||||
}
|
||||
|
||||
// now do it again to make sure we preserve values etc.
|
||||
try {
|
||||
recoveringEngine = new InternalEngine(copy(replicaEngine.config(), EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG));
|
||||
if (flushed) {
|
||||
assertEquals(0, recoveringEngine.getTranslog().totalOperations());
|
||||
}
|
||||
recoveringEngine.recoverFromTranslog();
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getMaxSeqNo());
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getLocalCheckpoint());
|
||||
assertEquals(0, recoveringEngine.fillSequenceNumberHistory(3));
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getMaxSeqNo());
|
||||
assertEquals(maxSeqIDOnReplica, recoveringEngine.seqNoService().getLocalCheckpoint());
|
||||
} finally {
|
||||
IOUtils.close(recoveringEngine);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.index.mapper;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||
import org.elasticsearch.action.bulk.BulkRequest;
|
||||
import org.elasticsearch.action.bulk.BulkResponse;
|
||||
import org.elasticsearch.action.bulk.TransportBulkAction;
|
||||
|
@ -114,22 +115,24 @@ public class DynamicMappingDisabledTests extends ESSingleNodeTestCase {
|
|||
request.source(Requests.INDEX_CONTENT_TYPE, "foo", 3);
|
||||
BulkRequest bulkRequest = new BulkRequest();
|
||||
bulkRequest.add(request);
|
||||
final AtomicBoolean onFailureCalled = new AtomicBoolean();
|
||||
final AtomicBoolean gotResponse = new AtomicBoolean();
|
||||
|
||||
transportBulkAction.execute(bulkRequest, new ActionListener<BulkResponse>() {
|
||||
@Override
|
||||
public void onResponse(BulkResponse bulkResponse) {
|
||||
fail("onResponse shouldn't be called");
|
||||
BulkItemResponse itemResponse = bulkResponse.getItems()[0];
|
||||
assertTrue(itemResponse.isFailed());
|
||||
assertThat(itemResponse.getFailure().getCause(), instanceOf(IndexNotFoundException.class));
|
||||
assertEquals("no such index and [index.mapper.dynamic] is [false]", itemResponse.getFailure().getCause().getMessage());
|
||||
gotResponse.set(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
onFailureCalled.set(true);
|
||||
assertThat(e, instanceOf(IndexNotFoundException.class));
|
||||
assertEquals("no such index and [index.mapper.dynamic] is [false]", e.getMessage());
|
||||
fail("unexpected failure in bulk action, expected failed bulk item");
|
||||
}
|
||||
});
|
||||
|
||||
assertTrue(onFailureCalled.get());
|
||||
assertTrue(gotResponse.get());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,11 +29,11 @@ import org.elasticsearch.action.index.IndexRequest;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.engine.InternalEngineTests;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
|
@ -289,9 +289,9 @@ public class RecoveryDuringReplicationTests extends ESIndexLevelReplicationTestC
|
|||
return new RecoveryTarget(indexShard, node, recoveryListener, l -> {
|
||||
}) {
|
||||
@Override
|
||||
public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
|
||||
preparedForTranslog.set(true);
|
||||
super.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp);
|
||||
super.prepareForTranslogOperations(totalTranslogOps);
|
||||
}
|
||||
};
|
||||
});
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequest;
|
|||
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
||||
import org.elasticsearch.action.admin.indices.stats.ShardStats;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
|
@ -77,6 +78,7 @@ import org.elasticsearch.index.mapper.Mapping;
|
|||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.SeqNoFieldMapper;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbersService;
|
||||
|
@ -896,6 +898,46 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
closeShards(newShard);
|
||||
}
|
||||
|
||||
/* This test just verifies that we fill up local checkpoint up to max seen seqID on primary recovery */
|
||||
public void testRecoverFromStoreWithNoOps() throws IOException {
|
||||
final IndexShard shard = newStartedShard(true);
|
||||
indexDoc(shard, "test", "0");
|
||||
Engine.Index test = indexDoc(shard, "test", "1");
|
||||
// start a replica shard and index the second doc
|
||||
final IndexShard otherShard = newStartedShard(false);
|
||||
test = otherShard.prepareIndexOnReplica(
|
||||
SourceToParse.source(shard.shardId().getIndexName(), test.type(), test.id(), test.source(),
|
||||
XContentType.JSON),
|
||||
1, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
otherShard.index(test);
|
||||
|
||||
final ShardRouting primaryShardRouting = shard.routingEntry();
|
||||
IndexShard newShard = reinitShard(otherShard, ShardRoutingHelper.initWithSameId(primaryShardRouting,
|
||||
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE));
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
|
||||
assertTrue(newShard.recoverFromStore());
|
||||
assertEquals(1, newShard.recoveryState().getTranslog().recoveredOperations());
|
||||
assertEquals(1, newShard.recoveryState().getTranslog().totalOperations());
|
||||
assertEquals(1, newShard.recoveryState().getTranslog().totalOperationsOnStart());
|
||||
assertEquals(100.0f, newShard.recoveryState().getTranslog().recoveredPercent(), 0.01f);
|
||||
Translog.Snapshot snapshot = newShard.getTranslog().newSnapshot();
|
||||
Translog.Operation operation;
|
||||
int numNoops = 0;
|
||||
while((operation = snapshot.next()) != null) {
|
||||
if (operation.opType() == Translog.Operation.Type.NO_OP) {
|
||||
numNoops++;
|
||||
assertEquals(1, operation.primaryTerm());
|
||||
assertEquals(0, operation.seqNo());
|
||||
}
|
||||
}
|
||||
assertEquals(1, numNoops);
|
||||
newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
|
||||
assertDocCount(newShard, 1);
|
||||
assertDocCount(shard, 2);
|
||||
closeShards(newShard, shard);
|
||||
}
|
||||
|
||||
public void testRecoverFromCleanStore() throws IOException {
|
||||
final IndexShard shard = newStartedShard(true);
|
||||
indexDoc(shard, "test", "0");
|
||||
|
@ -1281,8 +1323,8 @@ public class IndexShardTests extends IndexShardTestCase {
|
|||
new RecoveryTarget(shard, discoveryNode, recoveryListener, aLong -> {
|
||||
}) {
|
||||
@Override
|
||||
public void prepareForTranslogOperations(int totalTranslogOps, long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
super.prepareForTranslogOperations(totalTranslogOps, maxUnsafeAutoIdTimestamp);
|
||||
public void prepareForTranslogOperations(int totalTranslogOps) throws IOException {
|
||||
super.prepareForTranslogOperations(totalTranslogOps);
|
||||
// Shard is still inactive since we haven't started recovering yet
|
||||
assertFalse(replica.isActive());
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ import org.apache.lucene.index.Term;
|
|||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -123,7 +122,7 @@ public class RefreshListenersTests extends ESTestCase {
|
|||
store, new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()), newMergePolicy(), iwc.getAnalyzer(),
|
||||
iwc.getSimilarity(), new CodecService(null, logger), eventListener, translogHandler,
|
||||
IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), translogConfig,
|
||||
TimeValue.timeValueMinutes(5), listeners, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, null);
|
||||
TimeValue.timeValueMinutes(5), listeners, null);
|
||||
engine = new InternalEngine(config);
|
||||
listeners.setTranslog(engine.getTranslog());
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ public class PeerRecoveryTargetServiceTests extends IndexShardTestCase {
|
|||
long seqNo = 0;
|
||||
for (int i = 0; i < docs; i++) {
|
||||
Engine.Index indexOp = replica.prepareIndexOnReplica(
|
||||
SourceToParse.source(SourceToParse.Origin.REPLICA, index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON),
|
||||
SourceToParse.source(index, "type", "doc_" + i, new BytesArray("{}"), XContentType.JSON),
|
||||
seqNo++, 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
replica.index(indexOp);
|
||||
if (rarely()) {
|
||||
|
|
|
@ -393,7 +393,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
void prepareTargetForTranslog(final int totalTranslogOps, final long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
void prepareTargetForTranslog(final int totalTranslogOps) throws IOException {
|
||||
prepareTargetForTranslogCalled.set(true);
|
||||
}
|
||||
|
||||
|
@ -483,7 +483,7 @@ public class RecoverySourceHandlerTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
void prepareTargetForTranslog(final int totalTranslogOps, final long maxUnsafeAutoIdTimestamp) throws IOException {
|
||||
void prepareTargetForTranslog(final int totalTranslogOps) throws IOException {
|
||||
prepareTargetForTranslogCalled.set(true);
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
package org.elasticsearch.plugins;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.index.IndexModule;
|
||||
|
@ -30,6 +31,7 @@ import java.nio.file.Files;
|
|||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.hasToString;
|
||||
|
@ -121,4 +123,32 @@ public class PluginsServiceTests extends ESTestCase {
|
|||
assertThat(e, hasToString(containsString(expected)));
|
||||
}
|
||||
|
||||
public void testStartupWithRemovingMarker() throws IOException {
|
||||
final Path home = createTempDir();
|
||||
final Settings settings =
|
||||
Settings.builder()
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), home)
|
||||
.build();
|
||||
final Path fake = home.resolve("plugins").resolve("fake");
|
||||
Files.createDirectories(fake);
|
||||
Files.createFile(fake.resolve("plugin.jar"));
|
||||
final Path removing = fake.resolve(".removing-fake");
|
||||
Files.createFile(fake.resolve(".removing-fake"));
|
||||
PluginTestUtil.writeProperties(
|
||||
fake,
|
||||
"description", "fake",
|
||||
"name", "fake",
|
||||
"version", "1.0.0",
|
||||
"elasticsearch.version", Version.CURRENT.toString(),
|
||||
"java.version", System.getProperty("java.specification.version"),
|
||||
"classname", "Fake",
|
||||
"has.native.controller", "false");
|
||||
final IllegalStateException e = expectThrows(IllegalStateException.class, () -> newPluginsService(settings));
|
||||
final String expected = String.format(
|
||||
Locale.ROOT,
|
||||
"found file [%s] from a failed attempt to remove the plugin [fake]; execute [elasticsearch-plugin remove fake]",
|
||||
removing);
|
||||
assertThat(e, hasToString(containsString(expected)));
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -52,8 +52,9 @@ public class NativeScriptTests extends ESTestCase {
|
|||
List<Setting<?>> scriptSettings = scriptModule.getSettings();
|
||||
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
|
||||
|
||||
ExecutableScript executable = scriptModule.getScriptService().executable(
|
||||
new Script(ScriptType.INLINE, NativeScriptEngineService.NAME, "my", Collections.emptyMap()), ScriptContext.Standard.SEARCH);
|
||||
Script script = new Script(ScriptType.INLINE, NativeScriptEngineService.NAME, "my", Collections.emptyMap());
|
||||
CompiledScript compiledScript = scriptModule.getScriptService().compile(script, ScriptContext.Standard.SEARCH);
|
||||
ExecutableScript executable = scriptModule.getScriptService().executable(compiledScript, script.getParams());
|
||||
assertThat(executable.run().toString(), equalTo("test"));
|
||||
}
|
||||
|
||||
|
|
|
@ -346,7 +346,9 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
|
||||
public void testExecutableCountedInCompilationStats() throws IOException {
|
||||
buildScriptService(Settings.EMPTY);
|
||||
scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
Script script = new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap());
|
||||
CompiledScript compiledScript = scriptService.compile(script, randomFrom(scriptContexts));
|
||||
scriptService.executable(compiledScript, script.getParams());
|
||||
assertEquals(1L, scriptService.stats().getCompilations());
|
||||
}
|
||||
|
||||
|
@ -371,8 +373,9 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), 1);
|
||||
builder.put("script.inline", "true");
|
||||
buildScriptService(builder.build());
|
||||
scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
Script script = new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap());
|
||||
scriptService.compile(script, randomFrom(scriptContexts));
|
||||
scriptService.compile(script, randomFrom(scriptContexts));
|
||||
assertEquals(1L, scriptService.stats().getCompilations());
|
||||
}
|
||||
|
||||
|
@ -394,8 +397,8 @@ public class ScriptServiceTests extends ESTestCase {
|
|||
builder.put(ScriptService.SCRIPT_CACHE_SIZE_SETTING.getKey(), 1);
|
||||
builder.put("script.inline", "true");
|
||||
buildScriptService(builder.build());
|
||||
scriptService.executable(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
scriptService.executable(new Script(ScriptType.INLINE, "test", "2+2", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
scriptService.compile(new Script(ScriptType.INLINE, "test", "1+1", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
scriptService.compile(new Script(ScriptType.INLINE, "test", "2+2", Collections.emptyMap()), randomFrom(scriptContexts));
|
||||
assertEquals(2L, scriptService.stats().getCompilations());
|
||||
assertEquals(1L, scriptService.stats().getCacheEvictions());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.script;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable.Reader;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.test.AbstractSerializingTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class StoredScriptSourceTests extends AbstractSerializingTestCase<StoredScriptSource> {
|
||||
|
||||
@Override
|
||||
protected StoredScriptSource createTestInstance() {
|
||||
String lang = randomAlphaOfLengthBetween(1, 20);
|
||||
XContentType xContentType = randomFrom(XContentType.JSON, XContentType.YAML);
|
||||
try {
|
||||
XContentBuilder template = XContentBuilder.builder(xContentType.xContent());
|
||||
template.startObject();
|
||||
template.startObject("query");
|
||||
template.startObject("match");
|
||||
template.field("title", "{{query_string}}");
|
||||
template.endObject();
|
||||
template.endObject();
|
||||
template.endObject();
|
||||
Map<String, String> options = new HashMap<>();
|
||||
if (randomBoolean()) {
|
||||
options.put(Script.CONTENT_TYPE_OPTION, xContentType.mediaType());
|
||||
}
|
||||
return StoredScriptSource.parse(lang, template.bytes(), xContentType);
|
||||
} catch (IOException e) {
|
||||
throw new AssertionError("Failed to create test instance", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected StoredScriptSource doParseInstance(XContentParser parser) throws IOException {
|
||||
return StoredScriptSource.fromXContent(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Reader<StoredScriptSource> instanceReader() {
|
||||
return StoredScriptSource::new;
|
||||
}
|
||||
|
||||
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.script;
|
||||
|
||||
import org.elasticsearch.ResourceNotFoundException;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -198,8 +197,7 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
|
|||
builder.startObject().field("template", "code").endObject();
|
||||
|
||||
StoredScriptSource parsed = StoredScriptSource.parse("lang", builder.bytes(), XContentType.JSON);
|
||||
StoredScriptSource source = new StoredScriptSource("lang", "code",
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, builder.contentType().mediaType()));
|
||||
StoredScriptSource source = new StoredScriptSource("lang", "code", Collections.emptyMap());
|
||||
|
||||
assertThat(parsed, equalTo(source));
|
||||
}
|
||||
|
@ -214,8 +212,7 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
|
|||
}
|
||||
|
||||
StoredScriptSource parsed = StoredScriptSource.parse("lang", builder.bytes(), XContentType.JSON);
|
||||
StoredScriptSource source = new StoredScriptSource("lang", code,
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, builder.contentType().mediaType()));
|
||||
StoredScriptSource source = new StoredScriptSource("lang", code, Collections.emptyMap());
|
||||
|
||||
assertThat(parsed, equalTo(source));
|
||||
}
|
||||
|
@ -230,8 +227,7 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
|
|||
}
|
||||
|
||||
StoredScriptSource parsed = StoredScriptSource.parse("lang", builder.bytes(), XContentType.JSON);
|
||||
StoredScriptSource source = new StoredScriptSource("lang", code,
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, builder.contentType().mediaType()));
|
||||
StoredScriptSource source = new StoredScriptSource("lang", code, Collections.emptyMap());
|
||||
|
||||
assertThat(parsed, equalTo(source));
|
||||
}
|
||||
|
@ -246,8 +242,7 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
|
|||
}
|
||||
|
||||
StoredScriptSource parsed = StoredScriptSource.parse("lang", builder.bytes(), XContentType.JSON);
|
||||
StoredScriptSource source = new StoredScriptSource("lang", code,
|
||||
Collections.singletonMap(Script.CONTENT_TYPE_OPTION, builder.contentType().mediaType()));
|
||||
StoredScriptSource source = new StoredScriptSource("lang", code, Collections.emptyMap());
|
||||
|
||||
assertThat(parsed, equalTo(source));
|
||||
}
|
||||
|
@ -328,16 +323,6 @@ public class StoredScriptTests extends AbstractSerializingTestCase<StoredScriptS
|
|||
StoredScriptSource.parse(null, builder.bytes(), XContentType.JSON));
|
||||
assertThat(iae.getMessage(), equalTo("illegal compiler options [{option=option}] specified"));
|
||||
}
|
||||
|
||||
// check for illegal use of content type option
|
||||
try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) {
|
||||
builder.startObject().field("script").startObject().field("lang", "lang").field("code", "code")
|
||||
.startObject("options").field("content_type", "option").endObject().endObject().endObject();
|
||||
|
||||
ParsingException pe = expectThrows(ParsingException.class, () ->
|
||||
StoredScriptSource.parse(null, builder.bytes(), XContentType.JSON));
|
||||
assertThat(pe.getRootCause().getMessage(), equalTo("content_type cannot be user-specified"));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -564,11 +564,6 @@ public class SumIT extends AbstractNumericTestCase {
|
|||
|
||||
return new LeafSearchScript() {
|
||||
|
||||
@Override
|
||||
public Object unwrap(Object value) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextVar(String name, Object value) {
|
||||
vars.put(name, value);
|
||||
|
|
|
@ -305,11 +305,6 @@ public class ValueCountIT extends ESIntegTestCase {
|
|||
|
||||
return new LeafSearchScript() {
|
||||
|
||||
@Override
|
||||
public Object unwrap(Object value) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNextVar(String name, Object value) {
|
||||
vars.put(name, value);
|
||||
|
|
|
@ -58,11 +58,6 @@ public class ScriptValuesTests extends ESTestCase {
|
|||
return randomBoolean() ? values : Arrays.asList(values);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object unwrap(Object value) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorer scorer) {
|
||||
}
|
||||
|
|
|
@ -211,6 +211,27 @@ public class TribeIT extends ESIntegTestCase {
|
|||
|
||||
private Releasable startTribeNode(Predicate<InternalTestCluster> filter, Settings settings) throws Exception {
|
||||
final String node = internalCluster().startNode(createTribeSettings(filter).put(settings).build());
|
||||
|
||||
// wait for node to be connected to all tribe clusters
|
||||
final Set<String> expectedNodes = Sets.newHashSet(internalCluster().getNodeNames());
|
||||
doWithAllClusters(filter, c -> {
|
||||
// Adds the tribe client node dedicated to this remote cluster
|
||||
for (String tribeNode : internalCluster().getNodeNames()) {
|
||||
expectedNodes.add(tribeNode + "/" + c.getClusterName());
|
||||
}
|
||||
// Adds the remote clusters nodes names
|
||||
Collections.addAll(expectedNodes, c.getNodeNames());
|
||||
});
|
||||
assertBusy(() -> {
|
||||
ClusterState state = client().admin().cluster().prepareState().setNodes(true).get().getState();
|
||||
Set<String> nodes = StreamSupport.stream(state.getNodes().spliterator(), false).map(DiscoveryNode::getName).collect(toSet());
|
||||
assertThat(nodes, containsInAnyOrder(expectedNodes.toArray()));
|
||||
});
|
||||
// wait for join to be fully applied on all nodes in the tribe clusters, see https://github.com/elastic/elasticsearch/issues/23695
|
||||
doWithAllClusters(filter, c -> {
|
||||
assertFalse(c.client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).get().isTimedOut());
|
||||
});
|
||||
|
||||
return () -> {
|
||||
try {
|
||||
while(internalCluster().getNodeNames().length > 0) {
|
||||
|
@ -256,9 +277,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
|
||||
ensureGreen(cluster2.client());
|
||||
|
||||
// Wait for the tribe node to connect to the two remote clusters
|
||||
assertNodes(ALL);
|
||||
|
||||
// Wait for the tribe node to retrieve the indices into its cluster state
|
||||
assertIndicesExist(client(), "test1", "test2");
|
||||
|
||||
|
@ -294,9 +312,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertAcked(cluster2.client().admin().indices().prepareCreate("block_test2"));
|
||||
ensureGreen(cluster2.client());
|
||||
|
||||
// Wait for the tribe node to connect to the two remote clusters
|
||||
assertNodes(ALL);
|
||||
|
||||
// Wait for the tribe node to retrieve the indices into its cluster state
|
||||
assertIndicesExist(client(), "test1", "test2", "block_test1", "block_test2");
|
||||
|
||||
|
@ -328,9 +343,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertAcked(cluster2.client().admin().indices().prepareCreate("conflict"));
|
||||
ensureGreen(cluster2.client());
|
||||
|
||||
// Wait for the tribe node to connect to the two remote clusters
|
||||
assertNodes(ALL);
|
||||
|
||||
// Wait for the tribe node to retrieve the indices into its cluster state
|
||||
assertIndicesExist(client(), "test1", "test2");
|
||||
|
||||
|
@ -358,9 +370,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertAcked(cluster2.client().admin().indices().prepareCreate("shared"));
|
||||
ensureGreen(cluster2.client());
|
||||
|
||||
// Wait for the tribe node to connect to the two remote clusters
|
||||
assertNodes(ALL);
|
||||
|
||||
// Wait for the tribe node to retrieve the indices into its cluster state
|
||||
assertIndicesExist(client(), "test1", "test2", "shared");
|
||||
|
||||
|
@ -383,9 +392,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertAcked(cluster2.client().admin().indices().prepareCreate("test2"));
|
||||
ensureGreen(cluster2.client());
|
||||
|
||||
// Wait for the tribe node to connect to the two remote clusters
|
||||
assertNodes(ALL);
|
||||
|
||||
// Wait for the tribe node to retrieve the indices into its cluster state
|
||||
assertIndicesExist(client(), "test1", "test2");
|
||||
|
||||
|
@ -444,9 +450,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
assertTrue(cluster1.client().admin().indices().prepareClose("first").get().isAcknowledged());
|
||||
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
// Wait for the tribe node to connect to the two remote clusters
|
||||
assertNodes(ALL);
|
||||
|
||||
// The closed index is not part of the tribe node cluster state
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
assertFalse(clusterState.getMetaData().hasIndex("first"));
|
||||
|
@ -481,7 +484,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
|
||||
for (Predicate<InternalTestCluster> predicate : predicates) {
|
||||
try (Releasable tribeNode = startTribeNode(predicate, Settings.EMPTY)) {
|
||||
assertNodes(predicate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -492,7 +494,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
MergableCustomMetaData1 customMetaData1 = new MergableCustomMetaData1("a");
|
||||
MergableCustomMetaData1 customMetaData2 = new MergableCustomMetaData1("b");
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
assertNodes(ALL);
|
||||
putCustomMetaData(cluster1, customMetaData1);
|
||||
putCustomMetaData(cluster2, customMetaData2);
|
||||
assertCustomMetaDataUpdated(internalCluster(), customMetaData2);
|
||||
|
@ -510,7 +511,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
Collections.sort(customMetaDatas, (cm1, cm2) -> cm2.getData().compareTo(cm1.getData()));
|
||||
final MergableCustomMetaData1 tribeNodeCustomMetaData = customMetaDatas.get(0);
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
assertNodes(ALL);
|
||||
putCustomMetaData(cluster1, customMetaData1);
|
||||
assertCustomMetaDataUpdated(internalCluster(), customMetaData1);
|
||||
putCustomMetaData(cluster2, customMetaData2);
|
||||
|
@ -530,7 +530,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
Collections.sort(mergedCustomMetaDataType1, (cm1, cm2) -> cm2.getData().compareTo(cm1.getData()));
|
||||
Collections.sort(mergedCustomMetaDataType2, (cm1, cm2) -> cm2.getData().compareTo(cm1.getData()));
|
||||
try (Releasable tribeNode = startTribeNode()) {
|
||||
assertNodes(ALL);
|
||||
// test putting multiple custom md types propagates to tribe
|
||||
putCustomMetaData(cluster1, firstCustomMetaDataType1);
|
||||
putCustomMetaData(cluster1, firstCustomMetaDataType2);
|
||||
|
@ -631,24 +630,6 @@ public class TribeIT extends ESIntegTestCase {
|
|||
});
|
||||
}
|
||||
|
||||
private static void assertNodes(Predicate<InternalTestCluster> filter) throws Exception {
|
||||
final Set<String> expectedNodes = Sets.newHashSet(internalCluster().getNodeNames());
|
||||
doWithAllClusters(filter, c -> {
|
||||
// Adds the tribe client node dedicated to this remote cluster
|
||||
for (String tribeNode : internalCluster().getNodeNames()) {
|
||||
expectedNodes.add(tribeNode + "/" + c.getClusterName());
|
||||
}
|
||||
// Adds the remote clusters nodes names
|
||||
Collections.addAll(expectedNodes, c.getNodeNames());
|
||||
});
|
||||
|
||||
assertBusy(() -> {
|
||||
ClusterState state = client().admin().cluster().prepareState().setNodes(true).get().getState();
|
||||
Set<String> nodes = StreamSupport.stream(state.getNodes().spliterator(), false).map(DiscoveryNode::getName).collect(toSet());
|
||||
assertThat(nodes, containsInAnyOrder(expectedNodes.toArray()));
|
||||
});
|
||||
}
|
||||
|
||||
private static void doWithAllClusters(Consumer<InternalTestCluster> consumer) {
|
||||
doWithAllClusters(cluster -> cluster != null, consumer);
|
||||
}
|
||||
|
|
Binary file not shown.
Binary file not shown.
|
@ -165,6 +165,7 @@ configure(distributions) {
|
|||
from project(':core').configurations.runtime
|
||||
// delay add tools using closures, since they have not yet been configured, so no jar task exists yet
|
||||
from { project(':distribution:tools:java-version-checker').jar }
|
||||
from { project(':distribution:tools:plugin-cli').jar }
|
||||
}
|
||||
|
||||
modulesFiles = copySpec {
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'elasticsearch.build'
|
||||
|
||||
dependencies {
|
||||
provided "org.elasticsearch:elasticsearch:${version}"
|
||||
testCompile "org.elasticsearch.test:framework:${version}"
|
||||
testCompile 'com.google.jimfs:jimfs:1.1'
|
||||
testCompile 'com.google.guava:guava:18.0'
|
||||
}
|
||||
|
||||
test {
|
||||
// TODO: find a way to add permissions for the tests in this module
|
||||
systemProperty 'tests.security.manager', 'false'
|
||||
}
|
|
@ -26,8 +26,8 @@ import org.apache.lucene.util.CollectionUtil;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.bootstrap.JarHell;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.EnvironmentAwareCommand;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
|
@ -19,15 +19,6 @@
|
|||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.AtomicMoveNotSupportedException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
import joptsimple.OptionSet;
|
||||
import joptsimple.OptionSpec;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
|
@ -38,6 +29,16 @@ import org.elasticsearch.cli.UserException;
|
|||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static org.elasticsearch.cli.Terminal.Verbosity.VERBOSE;
|
||||
|
||||
/**
|
||||
|
@ -101,19 +102,32 @@ class RemovePluginCommand extends EnvironmentAwareCommand {
|
|||
}
|
||||
|
||||
terminal.println(VERBOSE, "removing [" + pluginDir + "]");
|
||||
final Path tmpPluginDir = env.pluginsFile().resolve(".removing-" + pluginName);
|
||||
try {
|
||||
Files.move(pluginDir, tmpPluginDir, StandardCopyOption.ATOMIC_MOVE);
|
||||
} catch (final AtomicMoveNotSupportedException e) {
|
||||
/*
|
||||
* On a union file system if the plugin that we are removing is not installed on the
|
||||
* top layer then atomic move will not be supported. In this case, we fall back to a
|
||||
* non-atomic move.
|
||||
*/
|
||||
Files.move(pluginDir, tmpPluginDir);
|
||||
/*
|
||||
* We are going to create a marker file in the plugin directory that indicates that this plugin is a state of removal. If the
|
||||
* removal fails, the existence of this marker file indicates that the plugin is in a garbage state. We check for existence of this
|
||||
* marker file during startup so that we do not startup with plugins in such a garbage state.
|
||||
*/
|
||||
final Path removing = pluginDir.resolve(".removing-" + pluginName);
|
||||
/*
|
||||
* Add the contents of the plugin directory before creating the marker file and adding it to the list of paths to be deleted so
|
||||
* that the marker file is the last file to be deleted.
|
||||
*/
|
||||
try (Stream<Path> paths = Files.list(pluginDir)) {
|
||||
pluginPaths.addAll(paths.collect(Collectors.toList()));
|
||||
}
|
||||
pluginPaths.add(tmpPluginDir);
|
||||
|
||||
try {
|
||||
Files.createFile(removing);
|
||||
} catch (final FileAlreadyExistsException e) {
|
||||
/*
|
||||
* We need to suppress the marker file already existing as we could be in this state if a previous removal attempt failed and
|
||||
* the user is attempting to remove the plugin again.
|
||||
*/
|
||||
terminal.println(VERBOSE, "marker file [" + removing + "] already exists");
|
||||
}
|
||||
// now add the marker file
|
||||
pluginPaths.add(removing);
|
||||
// finally, add the plugin directory
|
||||
pluginPaths.add(pluginDir);
|
||||
IOUtils.rm(pluginPaths.toArray(new Path[pluginPaths.size()]));
|
||||
|
||||
/*
|
||||
|
@ -124,8 +138,7 @@ class RemovePluginCommand extends EnvironmentAwareCommand {
|
|||
if (Files.exists(pluginConfigDir)) {
|
||||
final String message = String.format(
|
||||
Locale.ROOT,
|
||||
"-> preserving plugin config files [%s] in case of upgrade; "
|
||||
+ "delete manually if not needed",
|
||||
"-> preserving plugin config files [%s] in case of upgrade; delete manually if not needed",
|
||||
pluginConfigDir);
|
||||
terminal.println(message);
|
||||
}
|
|
@ -34,8 +34,6 @@ import java.io.StringReader;
|
|||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.containsString;
|
||||
import static org.hamcrest.CoreMatchers.not;
|
||||
|
@ -159,6 +157,13 @@ public class RemovePluginCommandTests extends ESTestCase {
|
|||
assertEquals("plugin name is required", e.getMessage());
|
||||
}
|
||||
|
||||
public void testRemoveWhenRemovingMarker() throws Exception {
|
||||
Files.createDirectory(env.pluginsFile().resolve("fake"));
|
||||
Files.createFile(env.pluginsFile().resolve("fake").resolve("plugin.jar"));
|
||||
Files.createFile(env.pluginsFile().resolve("fake").resolve(".removing-fake"));
|
||||
removePlugin("fake", home);
|
||||
}
|
||||
|
||||
private String expectedConfigDirPreservedMessage(final Path configDir) {
|
||||
return "-> preserving plugin config files [" + configDir + "] in case of upgrade; delete manually if not needed";
|
||||
}
|
|
@ -204,3 +204,5 @@ include::index-modules/slowlog.asciidoc[]
|
|||
include::index-modules/store.asciidoc[]
|
||||
|
||||
include::index-modules/translog.asciidoc[]
|
||||
|
||||
include::index-modules/index-sorting.asciidoc[]
|
||||
|
|
|
@ -121,7 +121,12 @@ The service 'elasticsearch-service-x64' has been installed.
|
|||
|
||||
NOTE: While a JRE can be used for the Elasticsearch service, due to its use of a client VM (as opposed to a server JVM which offers better performance for long-running applications) its usage is discouraged and a warning will be issued.
|
||||
|
||||
NOTE: Upgrading (or downgrading) JVM versions does not require the service to be reinstalled. However, upgrading across JVM types (e.g. JRE versus SE) is not supported, and does require the service to be reinstalled.
|
||||
NOTE: The system environment variable `JAVA_HOME` should be set to the path to
|
||||
the JDK installation that you want the service to use. If you upgrade the JDK,
|
||||
you are not required to the reinstall the service but you must set the value of
|
||||
the system environment variable `JAVA_HOME` to the path to the new JDK
|
||||
installation. However, upgrading across JVM types (e.g. JRE versus SE) is not
|
||||
supported, and does require the service to be reinstalled.
|
||||
|
||||
[[windows-service-settings]]
|
||||
[float]
|
||||
|
|
|
@ -171,7 +171,7 @@ As elasticsearch is using JUnit 4, using the `@Before` and `@After` annotations
|
|||
-----------------------------------------
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(CustomSuggesterPlugin.class);
|
||||
return Arrays.asList(CustomSuggesterPlugin.class);
|
||||
}
|
||||
-----------------------------------------
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.ingest.AbstractProcessor;
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.Processor;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptContext;
|
||||
|
@ -70,7 +71,8 @@ public final class ScriptProcessor extends AbstractProcessor {
|
|||
*/
|
||||
@Override
|
||||
public void execute(IngestDocument document) {
|
||||
ExecutableScript executableScript = scriptService.executable(script, ScriptContext.Standard.INGEST);
|
||||
CompiledScript compiledScript = scriptService.compile(script, ScriptContext.Standard.INGEST);
|
||||
ExecutableScript executableScript = scriptService.executable(compiledScript, script.getParams());
|
||||
executableScript.setNextVar("ctx", document.getSourceAndMetadata());
|
||||
executableScript.run();
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.util.Map;
|
|||
|
||||
import org.elasticsearch.ingest.IngestDocument;
|
||||
import org.elasticsearch.ingest.RandomDocumentPicks;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
@ -46,7 +47,7 @@ public class ScriptProcessorTests extends ESTestCase {
|
|||
ScriptService scriptService = mock(ScriptService.class);
|
||||
Script script = new Script("_script");
|
||||
ExecutableScript executableScript = mock(ExecutableScript.class);
|
||||
when(scriptService.executable(any(Script.class), any())).thenReturn(executableScript);
|
||||
when(scriptService.executable(any(CompiledScript.class), any())).thenReturn(executableScript);
|
||||
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
document.put("bytes_in", randomInt());
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.query.QueryParseContext;
|
||||
import org.elasticsearch.script.CompiledScript;
|
||||
import org.elasticsearch.script.ExecutableScript;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
@ -71,7 +72,8 @@ public class TransportSearchTemplateAction extends HandledTransportAction<Search
|
|||
try {
|
||||
Script script = new Script(request.getScriptType(), TEMPLATE_LANG, request.getScript(),
|
||||
request.getScriptParams() == null ? Collections.emptyMap() : request.getScriptParams());
|
||||
ExecutableScript executable = scriptService.executable(script, SEARCH);
|
||||
CompiledScript compiledScript = scriptService.compile(script, SEARCH);
|
||||
ExecutableScript executable = scriptService.executable(compiledScript, script.getParams());
|
||||
|
||||
BytesReference source = (BytesReference) executable.run();
|
||||
response.setSource(source);
|
||||
|
|
|
@ -50,16 +50,4 @@ public class SimpleExecutableScript implements ExecutableScript {
|
|||
throw new IllegalArgumentException("Unsupported var [" + name + "]");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object unwrap(Object value) {
|
||||
// Some script engines (javascript) copy any maps they unwrap
|
||||
if (randomBoolean()) {
|
||||
if (value instanceof Map) {
|
||||
return new HashMap<>((Map<?, ?>) value);
|
||||
}
|
||||
}
|
||||
// Others just return the objects plain (painless)
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,6 +59,16 @@
|
|||
|
||||
- match: { hits.total: 10 }
|
||||
|
||||
---
|
||||
"Verify that we can still find things with the template":
|
||||
- do:
|
||||
search_template:
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
f1: v5_old
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Verify custom cluster metadata still exists during upgrade":
|
||||
- do:
|
||||
|
|
|
@ -1,15 +1,15 @@
|
|||
---
|
||||
"Index data and search on the old cluster":
|
||||
- do:
|
||||
indices.create:
|
||||
"Index data, search, and create things in the cluster state that we'll validate are there after the ugprade":
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
bulk:
|
||||
- do:
|
||||
bulk:
|
||||
refresh: true
|
||||
body:
|
||||
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
|
||||
|
@ -23,18 +23,16 @@
|
|||
- '{"index": {"_index": "test_index", "_type": "test_type"}}'
|
||||
- '{"f1": "v5_old", "f2": 4}'
|
||||
|
||||
- do:
|
||||
indices.flush:
|
||||
- do:
|
||||
indices.flush:
|
||||
index: test_index
|
||||
|
||||
- do:
|
||||
search:
|
||||
- do:
|
||||
search:
|
||||
index: test_index
|
||||
|
||||
- match: { hits.total: 5 }
|
||||
- match: { hits.total: 5 }
|
||||
|
||||
---
|
||||
"Add stuff to cluster state so that we can verify that it remains to exist during and after the rolling upgrade":
|
||||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: my_repo
|
||||
|
@ -54,3 +52,20 @@
|
|||
]
|
||||
}
|
||||
- match: { "acknowledged": true }
|
||||
|
||||
- do:
|
||||
put_template:
|
||||
id: test_search_template
|
||||
body:
|
||||
query:
|
||||
match:
|
||||
f1: "{{f1}}"
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
search_template:
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
f1: v5_old
|
||||
- match: { hits.total: 1 }
|
||||
|
|
|
@ -36,6 +36,16 @@
|
|||
|
||||
- match: { hits.total: 15 } # 10 docs from previous clusters plus 5 new docs
|
||||
|
||||
---
|
||||
"Verify that we can still find things with the template":
|
||||
- do:
|
||||
search_template:
|
||||
body:
|
||||
id: test_search_template
|
||||
params:
|
||||
f1: v5_old
|
||||
- match: { hits.total: 1 }
|
||||
|
||||
---
|
||||
"Verify custom cluster metadata still exists after rolling upgrade":
|
||||
- do:
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.apache.tools.ant.taskdefs.condition.Os
|
||||
|
||||
import java.nio.charset.StandardCharsets
|
||||
import java.nio.file.Files
|
||||
import java.util.stream.Stream
|
||||
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
apply plugin: 'war'
|
||||
apply plugin: 'elasticsearch.build'
|
||||
apply plugin: 'elasticsearch.rest-test'
|
||||
|
||||
final String wildflyVersion = '10.0.0.Final'
|
||||
final String wildflyDir = "${buildDir}/wildfly"
|
||||
final String wildflyInstall = "${buildDir}/wildfly/wildfly-${wildflyVersion}"
|
||||
// TODO: use ephemeral ports
|
||||
final int portOffset = 30000
|
||||
final int managementPort = 9990 + portOffset
|
||||
// we skip these tests on Windows so we do not need to worry about compatibility here
|
||||
final String stopWildflyCommand = "${wildflyInstall}/bin/jboss-cli.sh --controller=localhost:${managementPort} --connect command=:shutdown"
|
||||
|
||||
repositories {
|
||||
mavenCentral()
|
||||
// the Wildfly distribution is not available via a repository, so we fake an Ivy repository on top of the download site
|
||||
ivy {
|
||||
url "http://download.jboss.org"
|
||||
layout 'pattern', {
|
||||
artifact 'wildfly/[revision]/[module]-[revision].[ext]'
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
configurations {
|
||||
wildfly
|
||||
}
|
||||
|
||||
dependencies {
|
||||
providedCompile 'javax.enterprise:cdi-api:1.2'
|
||||
providedCompile 'org.jboss.spec.javax.annotation:jboss-annotations-api_1.2_spec:1.0.0.Final'
|
||||
providedCompile 'org.jboss.spec.javax.ws.rs:jboss-jaxrs-api_2.0_spec:1.0.0.Final'
|
||||
compile ('org.jboss.resteasy:resteasy-jackson2-provider:3.0.19.Final') {
|
||||
exclude module: 'jackson-annotations'
|
||||
exclude module: 'jackson-core'
|
||||
exclude module: 'jackson-databind'
|
||||
exclude module: 'jackson-jaxrs-json-provider'
|
||||
}
|
||||
compile "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.core:jackson-databind:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.jaxrs:jackson-jaxrs-base:${versions.jackson}"
|
||||
compile "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}"
|
||||
compile "org.apache.logging.log4j:log4j-api:${versions.log4j}"
|
||||
compile "org.apache.logging.log4j:log4j-core:${versions.log4j}"
|
||||
compile project(path: ':client:transport', configuration: 'runtime')
|
||||
wildfly "org.jboss:wildfly:${wildflyVersion}@zip"
|
||||
testCompile "org.elasticsearch.test:framework:${VersionProperties.elasticsearch}"
|
||||
}
|
||||
|
||||
task unzipWildfly(type: Sync) {
|
||||
into wildflyDir
|
||||
from { zipTree(configurations.wildfly.singleFile) }
|
||||
}
|
||||
|
||||
task deploy(type: Copy) {
|
||||
dependsOn unzipWildfly, war
|
||||
from war
|
||||
into "${wildflyInstall}/standalone/deployments"
|
||||
}
|
||||
|
||||
task writeElasticsearchProperties {
|
||||
onlyIf { !Os.isFamily(Os.FAMILY_WINDOWS) && project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_8 }
|
||||
dependsOn 'integTestCluster#wait', deploy
|
||||
doLast {
|
||||
final File elasticsearchProperties = file("${wildflyInstall}/standalone/configuration/elasticsearch.properties")
|
||||
elasticsearchProperties.write(
|
||||
[
|
||||
"transport.uri=${-> integTest.getNodes().get(0).transportUri()}",
|
||||
"cluster.name=${-> integTest.getNodes().get(0).clusterName}"
|
||||
].join("\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// the default configuration ships with IPv6 disabled but our cluster could be bound to IPv6 if the host supports it
|
||||
task enableIPv6 {
|
||||
dependsOn unzipWildfly
|
||||
doLast {
|
||||
final File standaloneConf = file("${wildflyInstall}/bin/standalone.conf")
|
||||
final List<String> lines =
|
||||
Files.readAllLines(standaloneConf.toPath())
|
||||
.collect { line -> line.replace("-Djava.net.preferIPv4Stack=true", "-Djava.net.preferIPv4Stack=false") }
|
||||
standaloneConf.write(lines.join("\n"))
|
||||
}
|
||||
}
|
||||
|
||||
task startWildfly {
|
||||
dependsOn enableIPv6, writeElasticsearchProperties
|
||||
doFirst {
|
||||
// we skip these tests on Windows so we do no need to worry about compatibility here
|
||||
final File script = new File(project.buildDir, "wildfly/wildfly.killer.sh")
|
||||
script.setText(
|
||||
["function shutdown {",
|
||||
" ${stopWildflyCommand}",
|
||||
"}",
|
||||
"trap shutdown EXIT",
|
||||
// will wait indefinitely for input, but we never pass input, and the pipe is only closed when the build dies
|
||||
"read line"].join('\n'), 'UTF-8')
|
||||
final ProcessBuilder pb = new ProcessBuilder("bash", script.absolutePath)
|
||||
pb.start()
|
||||
}
|
||||
doLast {
|
||||
// we skip these tests on Windows so we do no need to worry about compatibility here
|
||||
final ProcessBuilder pb =
|
||||
new ProcessBuilder("${wildflyInstall}/bin/standalone.sh", "-Djboss.socket.binding.port-offset=${portOffset}")
|
||||
final Process process = pb.start()
|
||||
new BufferedReader(new InputStreamReader(process.getInputStream())).withReader { br ->
|
||||
String line
|
||||
while ((line = br.readLine()) != null) {
|
||||
if (line.matches(".*WildFly Full \\d+\\.\\d+\\.\\d+\\.Final \\(WildFly Core \\d+\\.\\d+\\.\\d+\\.Final\\) started.*")) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
task configureTransportClient(type: LoggedExec) {
|
||||
dependsOn startWildfly
|
||||
// we skip these tests on Windows so we do not need to worry about compatibility here
|
||||
commandLine "${wildflyInstall}/bin/jboss-cli.sh",
|
||||
"--controller=localhost:${managementPort}",
|
||||
"--connect",
|
||||
"--command=/system-property=elasticsearch.properties:add(value=\${jboss.server.config.dir}/elasticsearch.properties)"
|
||||
}
|
||||
|
||||
task stopWildfly(type: LoggedExec) {
|
||||
commandLine stopWildflyCommand.split(' ')
|
||||
}
|
||||
|
||||
if (!Os.isFamily(Os.FAMILY_WINDOWS) && project.rootProject.ext.javaVersion == JavaVersion.VERSION_1_8) {
|
||||
integTestRunner.dependsOn(configureTransportClient)
|
||||
final TaskExecutionAdapter logDumpListener = new TaskExecutionAdapter() {
|
||||
@Override
|
||||
void afterExecute(final Task task, final TaskState state) {
|
||||
if (state.failure != null) {
|
||||
final File logFile = new File(wildflyInstall, "standalone/log/server.log")
|
||||
println("\nWildfly server log (from ${logFile}):")
|
||||
println('-----------------------------------------')
|
||||
final Stream<String> stream = Files.lines(logFile.toPath(), StandardCharsets.UTF_8)
|
||||
try {
|
||||
for (String line : stream) {
|
||||
println(line)
|
||||
}
|
||||
} finally {
|
||||
stream.close()
|
||||
}
|
||||
println('=========================================')
|
||||
}
|
||||
}
|
||||
}
|
||||
integTestRunner.doFirst {
|
||||
project.gradle.addListener(logDumpListener)
|
||||
}
|
||||
integTestRunner.doLast {
|
||||
project.gradle.removeListener(logDumpListener)
|
||||
}
|
||||
integTestRunner.finalizedBy(stopWildfly)
|
||||
} else {
|
||||
integTest.enabled = false
|
||||
}
|
||||
|
||||
check.dependsOn(integTest)
|
||||
|
||||
test.enabled = false
|
||||
|
||||
dependencyLicenses.enabled = false
|
||||
|
||||
thirdPartyAudit.enabled = false
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.wildfly.model;
|
||||
|
||||
import com.fasterxml.jackson.annotation.JsonProperty;
|
||||
|
||||
import javax.ws.rs.Consumes;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
@Consumes(MediaType.APPLICATION_JSON)
|
||||
public class Employee {
|
||||
|
||||
@JsonProperty(value = "first_name")
|
||||
private String firstName;
|
||||
|
||||
public String getFirstName() {
|
||||
return firstName;
|
||||
}
|
||||
|
||||
public void setFirstName(String firstName) {
|
||||
this.firstName = firstName;
|
||||
}
|
||||
|
||||
@JsonProperty(value = "last_name")
|
||||
private String lastName;
|
||||
|
||||
public String getLastName() {
|
||||
return lastName;
|
||||
}
|
||||
|
||||
public void setLastName(String lastName) {
|
||||
this.lastName = lastName;
|
||||
}
|
||||
|
||||
@JsonProperty(value = "age")
|
||||
private int age;
|
||||
|
||||
public int getAge() {
|
||||
return age;
|
||||
}
|
||||
|
||||
public void setAge(int age) {
|
||||
this.age = age;
|
||||
}
|
||||
|
||||
@JsonProperty(value = "about")
|
||||
private String about;
|
||||
|
||||
public String getAbout() {
|
||||
return about;
|
||||
}
|
||||
|
||||
public void setAbout(String about) {
|
||||
this.about = about;
|
||||
}
|
||||
|
||||
@JsonProperty(value = "interests")
|
||||
private List<String> interests;
|
||||
|
||||
public List<String> getInterests() {
|
||||
return interests;
|
||||
}
|
||||
|
||||
public void setInterests(List<String> interests) {
|
||||
this.interests = interests;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.wildfly.transport;
|
||||
|
||||
import javax.ws.rs.ApplicationPath;
|
||||
import javax.ws.rs.core.Application;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
|
||||
@ApplicationPath("/transport")
|
||||
public class TransportClientActivator extends Application {
|
||||
|
||||
@Override
|
||||
public Set<Class<?>> getClasses() {
|
||||
return Collections.singleton(TransportClientEmployeeResource.class);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.wildfly.transport;
|
||||
|
||||
import org.elasticsearch.action.get.GetResponse;
|
||||
import org.elasticsearch.action.index.IndexResponse;
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.wildfly.model.Employee;
|
||||
|
||||
import javax.inject.Inject;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.PUT;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
|
||||
@Path("/employees")
|
||||
public class TransportClientEmployeeResource {
|
||||
|
||||
@Inject
|
||||
private TransportClient client;
|
||||
|
||||
@GET
|
||||
@Path("/{id}")
|
||||
@Produces(MediaType.APPLICATION_JSON)
|
||||
public Response getEmployeeById(final @PathParam("id") Long id) {
|
||||
Objects.requireNonNull(id);
|
||||
final GetResponse response = client.prepareGet("megacorp", "employee", Long.toString(id)).get();
|
||||
if (response.isExists()) {
|
||||
final Map<String, Object> source = response.getSource();
|
||||
final Employee employee = new Employee();
|
||||
employee.setFirstName((String) source.get("first_name"));
|
||||
employee.setLastName((String) source.get("last_name"));
|
||||
employee.setAge((Integer) source.get("age"));
|
||||
employee.setAbout((String) source.get("about"));
|
||||
@SuppressWarnings("unchecked") final List<String> interests = (List<String>) source.get("interests");
|
||||
employee.setInterests(interests);
|
||||
return Response.ok(employee).build();
|
||||
} else {
|
||||
return Response.status(Response.Status.NOT_FOUND).build();
|
||||
}
|
||||
}
|
||||
|
||||
@PUT
|
||||
@Path("/{id}")
|
||||
@Produces(MediaType.APPLICATION_JSON)
|
||||
public Response putEmployeeById(final @PathParam("id") Long id, final Employee employee) throws URISyntaxException, IOException {
|
||||
Objects.requireNonNull(id);
|
||||
Objects.requireNonNull(employee);
|
||||
try (XContentBuilder builder = jsonBuilder()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("first_name", employee.getFirstName());
|
||||
builder.field("last_name", employee.getLastName());
|
||||
builder.field("age", employee.getAge());
|
||||
builder.field("about", employee.getAbout());
|
||||
if (employee.getInterests() != null) {
|
||||
builder.startArray("interests");
|
||||
{
|
||||
for (final String interest : employee.getInterests()) {
|
||||
builder.value(interest);
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
final IndexResponse response = client.prepareIndex("megacorp", "employee", Long.toString(id)).setSource(builder).get();
|
||||
if (response.status().getStatus() == 201) {
|
||||
return Response.created(new URI("/employees/" + id)).build();
|
||||
} else {
|
||||
return Response.ok().build();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.wildfly.transport;
|
||||
|
||||
import org.elasticsearch.client.transport.TransportClient;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.transport.client.PreBuiltTransportClient;
|
||||
|
||||
import javax.enterprise.inject.Produces;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.Properties;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public final class TransportClientProducer {
|
||||
|
||||
@Produces
|
||||
public TransportClient createTransportClient() throws IOException {
|
||||
final String elasticsearchProperties = System.getProperty("elasticsearch.properties");
|
||||
final Properties properties = new Properties();
|
||||
|
||||
final String transportUri;
|
||||
final String clusterName;
|
||||
try (InputStream is = Files.newInputStream(getPath(elasticsearchProperties))) {
|
||||
properties.load(is);
|
||||
transportUri = properties.getProperty("transport.uri");
|
||||
clusterName = properties.getProperty("cluster.name");
|
||||
}
|
||||
|
||||
final int lastColon = transportUri.lastIndexOf(':');
|
||||
final String host = transportUri.substring(0, lastColon);
|
||||
final int port = Integer.parseInt(transportUri.substring(lastColon + 1));
|
||||
final Settings settings = Settings.builder().put("cluster.name", clusterName).build();
|
||||
final TransportClient transportClient = new PreBuiltTransportClient(settings, Collections.emptyList());
|
||||
transportClient.addTransportAddress(new TransportAddress(InetAddress.getByName(host), port));
|
||||
return transportClient;
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "get path not configured in environment")
|
||||
private Path getPath(final String elasticsearchProperties) {
|
||||
return PathUtils.get(elasticsearchProperties);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.wildfly.transport;
|
||||
|
||||
import org.jboss.resteasy.plugins.providers.jackson.ResteasyJackson2Provider;
|
||||
|
||||
import javax.ws.rs.ext.Provider;
|
||||
|
||||
@Provider
|
||||
public class TransportJacksonJsonProvider extends ResteasyJackson2Provider {
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
status = error
|
||||
|
||||
appender.console.type = Console
|
||||
appender.console.name = console
|
||||
appender.console.layout.type = PatternLayout
|
||||
appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.console.ref = console
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--
|
||||
Licensed to Elasticsearch under one or more contributor
|
||||
license agreements. See the NOTICE file distributed with
|
||||
this work for additional information regarding copyright
|
||||
ownership. Elasticsearch licenses this file to you under
|
||||
the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an
|
||||
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations
|
||||
under the License.
|
||||
-->
|
||||
<!-- Marker file indicating CDI should be enabled -->
|
||||
<beans xmlns="http://xmlns.jcp.org/xml/ns/javaee" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||
xsi:schemaLocation="
|
||||
http://xmlns.jcp.org/xml/ns/javaee
|
||||
http://xmlns.jcp.org/xml/ns/javaee/beans_1_1.xsd"
|
||||
bean-discovery-mode="all">
|
||||
</beans>
|
|
@ -0,0 +1,10 @@
|
|||
<jboss-deployment-structure>
|
||||
<deployment>
|
||||
<exclusions>
|
||||
<module name="com.fasterxml.jackson.core.jackson-core" />
|
||||
<module name="com.fasterxml.jackson.core.jackson-databind" />
|
||||
<module name="com.fasterxml.jackson.jaxrs.jackson-jaxrs-json-provider" />
|
||||
<module name="org.jboss.resteasy.resteasy-jackson2-provider" />
|
||||
</exclusions>
|
||||
</deployment>
|
||||
</jboss-deployment-structure>
|
|
@ -0,0 +1,101 @@
|
|||
package org.elasticsearch.wildfly;/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
import org.apache.http.client.methods.CloseableHttpResponse;
|
||||
import org.apache.http.client.methods.HttpGet;
|
||||
import org.apache.http.client.methods.HttpPut;
|
||||
import org.apache.http.entity.ContentType;
|
||||
import org.apache.http.entity.StringEntity;
|
||||
import org.apache.http.impl.client.CloseableHttpClient;
|
||||
import org.apache.http.impl.client.HttpClientBuilder;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class WildflyIT extends LuceneTestCase {
|
||||
|
||||
public void testTransportClient() throws URISyntaxException, IOException {
|
||||
try (CloseableHttpClient client = HttpClientBuilder.create().build()) {
|
||||
final String str = String.format(
|
||||
Locale.ROOT,
|
||||
"http://localhost:38080/wildfly-%s%s/transport/employees/1",
|
||||
Version.CURRENT,
|
||||
Build.CURRENT.isSnapshot() ? "-SNAPSHOT" : "");
|
||||
final HttpPut put = new HttpPut(new URI(str));
|
||||
final String body;
|
||||
try (XContentBuilder builder = jsonBuilder()) {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("first_name", "John");
|
||||
builder.field("last_name", "Smith");
|
||||
builder.field("age", 25);
|
||||
builder.field("about", "I love to go rock climbing");
|
||||
builder.startArray("interests");
|
||||
{
|
||||
builder.value("sports");
|
||||
builder.value("music");
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
builder.endObject();
|
||||
body = builder.string();
|
||||
}
|
||||
put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON));
|
||||
try (CloseableHttpResponse response = client.execute(put)) {
|
||||
assertThat(response.getStatusLine().getStatusCode(), equalTo(201));
|
||||
}
|
||||
|
||||
final HttpGet get = new HttpGet(new URI(str));
|
||||
try (
|
||||
CloseableHttpResponse response = client.execute(get);
|
||||
XContentParser parser =
|
||||
JsonXContent.jsonXContent.createParser(
|
||||
new NamedXContentRegistry(ClusterModule.getNamedXWriteables()),
|
||||
response.getEntity().getContent())) {
|
||||
final Map<String, Object> map = parser.map();
|
||||
assertThat(map.get("first_name"), equalTo("John"));
|
||||
assertThat(map.get("last_name"), equalTo("Smith"));
|
||||
assertThat(map.get("age"), equalTo(25));
|
||||
assertThat(map.get("about"), equalTo("I love to go rock climbing"));
|
||||
final Object interests = map.get("interests");
|
||||
assertThat(interests, instanceOf(List.class));
|
||||
@SuppressWarnings("unchecked") final List<String> interestsAsList = (List<String>) interests;
|
||||
assertThat(interestsAsList, containsInAnyOrder("sports", "music"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -21,6 +21,7 @@ List projects = [
|
|||
'distribution:deb',
|
||||
'distribution:rpm',
|
||||
'distribution:tools:java-version-checker',
|
||||
'distribution:tools:plugin-cli',
|
||||
'test:framework',
|
||||
'test:fixtures:example-fixture',
|
||||
'test:fixtures:hdfs-fixture',
|
||||
|
@ -56,11 +57,12 @@ List projects = [
|
|||
'plugins:repository-s3',
|
||||
'plugins:jvm-example',
|
||||
'plugins:store-smb',
|
||||
'qa:auto-create-index',
|
||||
'qa:backwards-5.0',
|
||||
'qa:evil-tests',
|
||||
'qa:multi-cluster-search',
|
||||
'qa:no-bootstrap-tests',
|
||||
'qa:rolling-upgrade',
|
||||
'qa:multi-cluster-search',
|
||||
'qa:smoke-test-client',
|
||||
'qa:smoke-test-http',
|
||||
'qa:smoke-test-ingest-with-all-dependencies',
|
||||
|
@ -70,6 +72,7 @@ List projects = [
|
|||
'qa:smoke-test-reindex-with-painless',
|
||||
'qa:smoke-test-tribe-node',
|
||||
'qa:vagrant',
|
||||
'qa:wildfly'
|
||||
]
|
||||
|
||||
boolean isEclipse = System.getProperty("eclipse.launcher") != null || gradle.startParameter.taskNames.contains('eclipse') || gradle.startParameter.taskNames.contains('cleanEclipse')
|
||||
|
|
|
@ -484,7 +484,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
final Engine.Index index;
|
||||
if (shard.routingEntry().primary()) {
|
||||
index = shard.prepareIndexOnPrimary(
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source),
|
||||
SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source),
|
||||
xContentType),
|
||||
Versions.MATCH_ANY,
|
||||
VersionType.INTERNAL,
|
||||
|
@ -492,7 +492,7 @@ public abstract class IndexShardTestCase extends ESTestCase {
|
|||
false);
|
||||
} else {
|
||||
index = shard.prepareIndexOnReplica(
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source),
|
||||
SourceToParse.source(shard.shardId().getIndexName(), type, id, new BytesArray(source),
|
||||
xContentType),
|
||||
randomInt(1 << 10), 1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
}
|
||||
|
|
|
@ -56,9 +56,4 @@ public abstract class TestScript implements ExecutableScript{
|
|||
Objects.requireNonNull(_superset_freq, "_superset_freq");
|
||||
Objects.requireNonNull(_superset_size, "_superset_size");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Double unwrap(Object value) {
|
||||
return ((Number) value).doubleValue();
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue